80#define DEBUG_TYPE "dagcombine"
92 cl::desc(
"Enable DAG combiner's use of IR alias analysis"));
96 cl::desc(
"Enable DAG combiner's use of TBAA"));
101 cl::desc(
"Only use DAG-combiner alias analysis in this"
109 cl::desc(
"Bypass the profitability model of load slicing"),
114 cl::desc(
"DAG combiner may split indexing from loads"));
118 cl::desc(
"DAG combiner enable merging multiple stores "
119 "into a wider store"));
123 cl::desc(
"Limit the number of operands to inline for Token Factors"));
127 cl::desc(
"Limit the number of times for the same StoreNode and RootNode "
128 "to bail out in store merging dependence check"));
132 cl::desc(
"DAG combiner enable reducing the width of load/op/store "
137 cl::desc(
"DAG combiner enable load/<replace bytes>/store with "
138 "a narrower store"));
148 bool LegalDAG =
false;
149 bool LegalOperations =
false;
150 bool LegalTypes =
false;
152 bool DisableGenericCombines;
209 while (!PruningList.empty()) {
210 auto *
N = PruningList.pop_back_val();
212 recursivelyDeleteUnusedNodes(
N);
222 while (!
N && !Worklist.empty()) {
223 N = Worklist.pop_back_val();
230 "Found a worklist entry without a corresponding map entry!");
240 : DAG(
D), TLI(
D.getTargetLoweringInfo()),
241 STI(
D.getSubtarget().getSelectionDAGInfo()), OptLevel(
OL), AA(AA) {
245 MaximumLegalStoreInBits = 0;
248 for (
MVT VT :
MVT::all_valuetypes())
250 TLI.isTypeLegal(
EVT(VT)) &&
251 VT.getSizeInBits().getKnownMinSize() >= MaximumLegalStoreInBits)
252 MaximumLegalStoreInBits = VT.getSizeInBits().getKnownMinSize();
257 PruningList.insert(
N);
262 void AddToWorklist(
SDNode *
N) {
264 "Deleted Node added to Worklist");
273 if (WorklistMap.insert(std::make_pair(
N, Worklist.size())).second)
274 Worklist.push_back(
N);
279 CombinedNodes.erase(
N);
280 PruningList.remove(
N);
281 StoreRootCountMap.erase(
N);
283 auto It = WorklistMap.find(
N);
284 if (It == WorklistMap.end())
288 Worklist[It->second] =
nullptr;
289 WorklistMap.erase(It);
293 bool recursivelyDeleteUnusedNodes(
SDNode *
N);
301 return CombineTo(
N, &Res, 1,
AddTo);
308 return CombineTo(
N, To, 2,
AddTo);
314 unsigned MaximumLegalStoreInBits;
319 bool SimplifyDemandedBits(
SDValue Op) {
320 unsigned BitWidth =
Op.getScalarValueSizeInBits();
332 AddToWorklist(
Op.getNode());
334 CommitTargetLoweringOpt(
TLO);
341 bool SimplifyDemandedVectorElts(
SDValue Op) {
343 if (
Op.getValueType().isScalableVector())
346 unsigned NumElts =
Op.getValueType().getVectorNumElements();
522 bool reassociationCanBreakAddressingModePattern(
unsigned Opc,
641 int64_t OffsetFromBase;
644 : MemNode(
N), OffsetFromBase(
Offset) {}
653 return StoreSource::Constant;
656 return StoreSource::Extract;
658 return StoreSource::Load;
660 return StoreSource::Unknown;
777 assert(
LHSTy.isInteger() &&
"Shift amount is not an integer type!");
783 bool isTypeLegal(
const EVT &VT) {
784 if (!LegalTypes)
return true;
789 EVT getSetCCResultType(
EVT VT)
const {
804 explicit WorklistRemover(DAGCombiner &
dc)
808 DC.removeFromWorklist(
N);
816 explicit WorklistInserter(DAGCombiner &
dc)
821 void NodeInserted(
SDNode *
N)
override { DC.ConsiderForPruning(
N); }
831 ((DAGCombiner*)
DC)->AddToWorklist(
N);
836 return ((DAGCombiner*)
DC)->CombineTo(
N, &To[0], To.size(),
AddTo);
841 return ((DAGCombiner*)
DC)->CombineTo(
N, Res,
AddTo);
851 return ((DAGCombiner*)
DC)->recursivelyDeleteUnusedNodes(
N);
856 return ((DAGCombiner*)
DC)->CommitTargetLoweringOpt(
TLO);
863void DAGCombiner::deleteAndRecombine(
SDNode *
N) {
873 AddToWorklist(Op.getNode());
882 unsigned Bits =
Offset + std::max(LHS.getBitWidth(), RHS.getBitWidth());
883 LHS = LHS.zextOrSelf(Bits);
884 RHS = RHS.zextOrSelf(Bits);
895 LHS =
N.getOperand(0);
896 RHS =
N.getOperand(1);
927bool DAGCombiner::isOneUseSetCC(
SDValue N)
const {
966 return !(Const->isOpaque() &&
NoOpaques);
969 unsigned BitWidth =
N.getScalarValueSizeInBits();
970 for (
const SDValue &Op :
N->op_values()) {
974 if (!Const || Const->getAPIntValue().getBitWidth() !=
BitWidth ||
997bool DAGCombiner::reassociationCanBreakAddressingModePattern(
unsigned Opc,
1038 unsigned AS =
LoadStore->getAddressSpace();
1055SDValue DAGCombiner::reassociateOpsCommutative(
unsigned Opc,
const SDLoc &
DL,
1090 N1.getValueType().isFloatingPoint())
1091 if (!Flags.hasAllowReassociation() || !Flags.hasNoSignedZeros())
1103 assert(
N->getNumValues() ==
NumTo &&
"Broken CombineTo call!");
1107 dbgs() <<
" and " <<
NumTo - 1 <<
" other values\n");
1108 for (
unsigned i = 0, e =
NumTo; i !=
e; ++i)
1109 assert((!To[i].getNode() ||
1110 N->getValueType(i) == To[i].getValueType()) &&
1111 "Cannot combine value to value of different type!");
1113 WorklistRemover DeadNodes(*
this);
1117 for (
unsigned i = 0, e =
NumTo; i !=
e; ++i) {
1118 if (To[i].getNode()) {
1119 AddToWorklist(To[i].getNode());
1138 dbgs() <<
"\nWith: ";
TLO.New.getNode()->dump(&DAG);
1143 WorklistRemover DeadNodes(*
this);
1152 if (
TLO.Old.getNode()->use_empty())
1168 AddToWorklist(
Op.getNode());
1170 CommitTargetLoweringOpt(
TLO);
1177bool DAGCombiner::SimplifyDemandedVectorElts(
SDValue Op,
1187 AddToWorklist(
Op.getNode());
1189 CommitTargetLoweringOpt(
TLO);
1195 EVT VT =
Load->getValueType(0);
1200 WorklistRemover DeadNodes(*
this);
1204 AddToWorklist(Trunc.
getNode());
1214 :
LD->getExtensionType();
1217 LD->getChain(),
LD->getBasePtr(),
1221 unsigned Opc =
Op.getOpcode();
1251 if (!
NewOp.getNode())
1253 AddToWorklist(
NewOp.getNode());
1266 if (!
NewOp.getNode())
1268 AddToWorklist(
NewOp.getNode());
1279 if (!LegalOperations)
1282 EVT VT =
Op.getValueType();
1288 unsigned Opc =
Op.getOpcode();
1296 assert(
PVT != VT &&
"Don't know what type to promote to!");
1321 CombineTo(
Op.getNode(),
RV);
1331 AddToWorklist(
NN0.getNode());
1335 AddToWorklist(
NN1.getNode());
1347 if (!LegalOperations)
1350 EVT VT =
Op.getValueType();
1356 unsigned Opc =
Op.getOpcode();
1364 assert(
PVT != VT &&
"Don't know what type to promote to!");
1396 if (!LegalOperations)
1399 EVT VT =
Op.getValueType();
1405 unsigned Opc =
Op.getOpcode();
1413 assert(
PVT != VT &&
"Don't know what type to promote to!");
1423bool DAGCombiner::PromoteLoad(
SDValue Op) {
1424 if (!LegalOperations)
1430 EVT VT =
Op.getValueType();
1436 unsigned Opc =
Op.getOpcode();
1444 assert(
PVT != VT &&
"Don't know what type to promote to!");
1451 :
LD->getExtensionType();
1453 LD->getChain(),
LD->getBasePtr(),
1458 Result.getNode()->dump(&DAG);
dbgs() <<
'\n');
1459 WorklistRemover DeadNodes(*
this);
1463 AddToWorklist(
Result.getNode());
1475bool DAGCombiner::recursivelyDeleteUnusedNodes(
SDNode *
N) {
1476 if (!
N->use_empty())
1482 N = Nodes.pop_back_val();
1486 if (
N->use_empty()) {
1488 Nodes.insert(
ChildN.getNode());
1495 }
while (!Nodes.empty());
1514 AddToWorklist(&
Node);
1526 if (recursivelyDeleteUnusedNodes(
N))
1529 WorklistRemover DeadNodes(*
this);
1549 CombinedNodes.insert(
N);
1552 AddToWorklist(
ChildN.getNode());
1565 if (
RV.getNode() ==
N)
1570 "Node was deleted but visit returned new node!");
1574 if (
N->getNumValues() ==
RV.getNode()->getNumValues())
1577 assert(
N->getValueType(0) ==
RV.getValueType() &&
1578 N->getNumValues() == 1 &&
"Type mismatch");
1588 AddToWorklist(
RV.getNode());
1596 recursivelyDeleteUnusedNodes(
N);
1605 switch (
N->getOpcode()) {
1741#define BEGIN_REGISTER_VP_SDNODE(SDOPC, ...) case ISD::SDOPC:
1742#include "llvm/IR/VPIntrinsics.def"
1750 if (!DisableGenericCombines)
1754 if (!
RV.getNode()) {
1756 "Node was deleted but visit returned NULL!");
1770 if (!
RV.getNode()) {
1771 switch (
N->getOpcode()) {
1801 N->getNumValues() == 1) {
1821 if (
unsigned NumOps =
N->getNumOperands()) {
1822 if (
N->getOperand(0).getValueType() ==
MVT::Other)
1823 return N->getOperand(0);
1825 return N->getOperand(
NumOps-1);
1826 for (
unsigned i = 1; i <
NumOps-1; ++i)
1827 if (
N->getOperand(i).getValueType() ==
MVT::Other)
1828 return N->getOperand(i);
1836 if (
N->getNumOperands() == 2) {
1855 AddToWorklist(*(
N->use_begin()));
1867 for (
unsigned i = 0; i <
TFs.size(); ++i) {
1872 for (
unsigned j = i;
j <
TFs.size();
j++)
1873 Ops.emplace_back(
TFs[
j], 0);
1882 for (
const SDValue &Op : TF->op_values()) {
1883 switch (
Op.getOpcode()) {
1893 TFs.push_back(
Op.getNode());
1901 if (SeenOps.insert(
Op.getNode()).second)
1912 for (
unsigned i = 1, e =
TFs.size(); i <
e; i++)
1913 AddToWorklist(
TFs[i]);
1928 for (
const SDValue &Op : Ops) {
1936 if (SeenOps.contains(Op)) {
1943 "expected to find TokenFactor Operand");
1945 for (
unsigned i =
CurIdx + 1; i < Worklist.size(); ++i) {
1957 Worklist.push_back(std::make_pair(Op,
OpNumber));
1961 for (
unsigned i = 0; i < Worklist.size() && i < 1024; ++i) {
1965 auto CurNode = Worklist[i].first;
1968 "Node should not appear in worklist");
1969 switch (CurNode->getOpcode()) {
1978 for (
const SDValue &Op : CurNode->op_values())
1985 AddToWorklist(i, CurNode->getOperand(0).getNode(),
CurOpNumber);
1989 AddToWorklist(i, MemNode->getChain().getNode(),
CurOpNumber);
2007 for (
const SDValue &Op : Ops) {
2023 WorklistRemover DeadNodes(*
this);
2033 for (
unsigned i = 0, e =
N->getNumOperands(); i !=
e; ++i)
2034 Ops.push_back(
N->getOperand(i));
2036 }
while (!
N->use_empty());
2045 return Const !=
nullptr && !Const->isOpaque() ? Const :
nullptr;
2056 if (LD->isIndexed() || LD->getBasePtr().getNode() !=
N)
2058 VT = LD->getMemoryVT();
2059 AS = LD->getAddressSpace();
2061 if (ST->isIndexed() || ST->getBasePtr().getNode() !=
N)
2063 VT = ST->getMemoryVT();
2064 AS = ST->getAddressSpace();
2066 if (LD->isIndexed() || LD->getBasePtr().getNode() !=
N)
2068 VT = LD->getMemoryVT();
2069 AS = LD->getAddressSpace();
2071 if (ST->isIndexed() || ST->getBasePtr().getNode() !=
N)
2073 VT = ST->getMemoryVT();
2074 AS = ST->getAddressSpace();
2088 }
else if (
N->getOpcode() ==
ISD::SUB) {
2121 unsigned Opcode =
N->getOpcode();
2122 EVT VT =
N->getValueType(0);
2134 return C->isZero() &&
C->isNegative();
2136 return C->isZero() && !
C->isNegative();
2139 return C->isExactlyValue(1.0);
2164 "Unexpected binary operator");
2242 "Expecting add or sub");
2247 bool IsAdd =
N->getOpcode() ==
ISD::ADD;
2248 SDValue C = IsAdd ?
N->getOperand(1) :
N->getOperand(0);
2249 SDValue Z = IsAdd ?
N->getOperand(0) :
N->getOperand(1);
2256 Z.getOperand(0).getValueType() !=
MVT::i1)
2271 EVT VT =
C.getValueType();
2283 "Expecting add or sub");
2287 bool IsAdd =
N->getOpcode() ==
ISD::ADD;
2363 assert(Sub &&
"Constant folding failed");
2372 assert(
Add &&
"Constant folding failed");
2383 if ((!LegalOperations ||
2386 X.getScalarValueSizeInBits() == 1) {
2398 {N1, N0.getOperand(1)}))
2407 if (!reassociationCanBreakAddressingModePattern(
ISD::ADD,
DL, N0,
N1)) {
2437 if (
N1.getOpcode() ==
ISD::SUB && N0 ==
N1.getOperand(1))
2438 return N1.getOperand(0);
2458 N0 ==
N1.getOperand(1).getOperand(0))
2460 N1.getOperand(1).getOperand(1));
2464 N0 ==
N1.getOperand(1).getOperand(1))
2466 N1.getOperand(1).getOperand(0));
2471 N0 ==
N1.getOperand(0).getOperand(1))
2472 return DAG.
getNode(
N1.getOpcode(),
DL, VT,
N1.getOperand(0).getOperand(0),
2491 return (!Max && !Op) ||
2492 (
Max &&
Op &&
Max->getAPIntValue() == (-
Op->getAPIntValue()));
2576 const APInt &
C1 =
N1->getConstantOperandAPInt(0);
2585 const APInt &
VS1 =
N1->getConstantOperandAPInt(0);
2594 const APInt &
C1 =
N1->getConstantOperandAPInt(0);
2604 const APInt &
SV1 =
N1->getConstantOperandAPInt(0);
2614 unsigned Opcode =
N->getOpcode();
2655 bool Masked =
false;
2660 V = V.getOperand(0);
2666 V = V.getOperand(0);
2674 if (V.getResNo() != 1)
2681 EVT VT = V.getNode()->getValueType(0);
2724 N1.getOperand(0).getOperand(1),
2781 N0,
N1.getOperand(0),
N1.getOperand(2));
2800 if (!
N->hasAnyUseOfValue(1))
2843 EVT VT = V.getValueType();
2851 IsFlip = Const->isAllOnes();
2854 IsFlip = (Const->getAPIntValue() & 0x01) == 1;
2859 return V.getOperand(0);
2875 if (!
N->hasAnyUseOfValue(1))
2882 return DAG.
getNode(
N->getOpcode(),
DL,
N->getVTList(),
N1, N0);
2969 if (!LegalOperations ||
3008 if (!LegalOperations ||
3133 unsigned Opcode =
Carry0.getOpcode();
3134 if (Opcode !=
Carry1.getOpcode())
3189 return Merged.getValue(1);
3237 "Illegal truncation");
3245 DstVT.getScalarSizeInBits());
3251 DstVT.getScalarSizeInBits()),
3370 if (
N->getFlags().hasNoUnsignedWrap())
3376 if (
N->getFlags().hasNoSignedWrap())
3410 if (
N1.getOpcode() ==
ISD::SUB && N0 ==
N1.getOperand(0))
3411 return N1.getOperand(1);
3504 if (
N1.getOperand(0).getOpcode() ==
ISD::SUB &&
3507 N1.getOperand(0).getOperand(1),
3511 if (
N1.getOperand(1).getOpcode() ==
ISD::SUB &&
3515 N1.getOperand(1).getOperand(1));
3585 N1.getOperand(0).getScalarValueSizeInBits() == 1 &&
3614 if (
GA->getGlobal() ==
GB->getGlobal())
3644 if (!LegalOperations &&
N1.getOpcode() ==
ISD::SRL &&
N1.hasOneUse()) {
3648 ShAmtC->getAPIntValue() == (
N1.getScalarValueSizeInBits() - 1)) {
3710 if (!
N->hasAnyUseOfValue(1))
3741 if (!
N->hasAnyUseOfValue(1))
3753 if (IsSigned &&
N1C && !
N1C->getAPIntValue().isMinSignedValue()) {
3789 if (!LegalOperations ||
3804 if (!LegalOperations ||
3866 "Splat APInt should be element width");
3941 if ((
MulC - 1).isPowerOf2())
3943 else if ((
MulC + 1).isPowerOf2())
3951 "multiply-by-constant generated out of bounds shift");
3987 N1.getNode()->hasOneUse()) {
4034 if (!V || V->isZero()) {
4067 EVT NodeType =
Node->getValueType(0);
4068 if (!NodeType.isSimple())
4070 switch (NodeType.getSimpleVT().SimpleTy) {
4071 default:
return false;
4072 case MVT::i8:
LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8;
break;
4073 case MVT::i16:
LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16;
break;
4074 case MVT::i32:
LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32;
break;
4075 case MVT::i64:
LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64;
break;
4076 case MVT::i128:
LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128;
break;
4084 if (
Node->use_empty())
4087 unsigned Opcode =
Node->getOpcode();
4092 EVT VT =
Node->getValueType(0);
4143 CombineTo(
User, combined);
4154 EVT VT =
N->getValueType(0);
4157 unsigned Opc =
N->getOpcode();
4166 if (DAG.
isUndef(Opc, {N0, N1}))
4177 if (
N0C &&
N0C->isZero())
4200 EVT VT =
N->getValueType(0);
4215 if (
N1C &&
N1C->isAllOnes())
4219 if (
N1C &&
N1C->getAPIntValue().isMinSignedValue())
4242 AddToWorklist(
Mul.getNode());
4262 EVT VT =
N->getValueType(0);
4269 if (
C->isZero() ||
C->isOpaque())
4271 if (
C->getAPIntValue().isPowerOf2())
4273 if (
C->getAPIntValue().isNegatedPowerOf2())
4299 AddToWorklist(
Sign.getNode());
4303 AddToWorklist(
Srl.getNode());
4305 AddToWorklist(
Add.getNode());
4307 AddToWorklist(
Sra.getNode());
4344 EVT VT =
N->getValueType(0);
4359 if (
N1C &&
N1C->isAllOnes())
4377 AddToWorklist(
Mul.getNode());
4397 EVT VT =
N->getValueType(0);
4407 AddToWorklist(Trunc.
getNode());
4421 AddToWorklist(Trunc.
getNode());
4423 AddToWorklist(
Add.getNode());
4440 unsigned Opcode =
N->getOpcode();
4443 EVT VT =
N->getValueType(0);
4455 if (!isSigned &&
N1C &&
N1C->isAllOnes())
4475 AddToWorklist(
Add.getNode());
4483 AddToWorklist(
Add.getNode());
4509 AddToWorklist(
Mul.getNode());
4516 return DivRem.getValue(1);
4524 EVT VT =
N->getValueType(0);
4584 EVT VT =
N->getValueType(0);
4664 if (!
HiExists && (!LegalOperations ||
4667 return CombineTo(
N, Res, Res);
4672 if (!
LoExists && (!LegalOperations ||
4675 return CombineTo(
N, Res, Res);
4685 AddToWorklist(
Lo.getNode());
4687 if (
LoOpt.getNode() &&
LoOpt.getNode() !=
Lo.getNode() &&
4688 (!LegalOperations ||
4695 AddToWorklist(
Hi.getNode());
4698 (!LegalOperations ||
4710 EVT VT =
N->getValueType(0);
4730 return CombineTo(
N, Lo, Hi);
4741 EVT VT =
N->getValueType(0);
4747 return CombineTo(
N, Zero, Zero);
4753 return CombineTo(
N,
N->getOperand(0), Zero);
4773 return CombineTo(
N, Lo, Hi);
4798 IsSigned ?
N0C->getAPIntValue().smul_ov(
N1C->getAPIntValue(),
Overflow)
4807 return DAG.
getNode(
N->getOpcode(),
DL,
N->getVTList(),
N1, N0);
4815 if (
N1C &&
N1C->getAPIntValue() == 2)
4817 N->getVTList(), N0, N0);
4823 return CombineTo(
N,
And,
4872 if (
C1.getBitWidth() <
C2.getBitWidth() ||
4873 C1 !=
C2.sextOrSelf(
C1.getBitWidth()))
4948 EVT FPVT =
Fp.getOperand(0).getValueType();
4950 if (
FPVT.isVector())
4952 FPVT.getVectorElementCount());
4979 if (!(
C1 + 1).isPowerOf2() ||
C1.getBitWidth() <
C3.getBitWidth() ||
4980 C1 !=
C3.zextOrSelf(
C1.getBitWidth()))
4983 unsigned BW = (
C1 + 1).exactLogBase2();
4986 if (
FPVT.isVector())
4988 FPVT.getVectorElementCount());
5003 unsigned Opcode =
N->getOpcode();
5082 if (
XVT !=
Y.getValueType())
5086 if ((VT.
isVector() || LegalOperations) &&
5106 if (
XVT !=
Y.getValueType())
5152 if (
XVT.isInteger() &&
XVT ==
Y.getValueType() &&
5175 assert(
X.getValueType() ==
Y.getValueType() &&
5176 "Inputs to shuffles are not the same type");
5182 if (!
SVN0->hasOneUse() || !
SVN1->hasOneUse() ||
5183 !
SVN0->getMask().equals(
SVN1->getMask()))
5225 "Unexpected operand types for bitwise logic op");
5228 "Unexpected operand types for setcc");
5263 AddToWorklist(
Or.getNode());
5282 AddToWorklist(
And.getNode());
5296 AddToWorklist(
Add.getNode());
5320 if (
LL == RL &&
C0 &&
C1 && !
C0->isOpaque() && !
C1->isOpaque()) {
5343 if (
LL ==
RR && LR == RL) {
5350 if (
LL == RL && LR ==
RR) {
5354 (!LegalOperations ||
5368 EVT VT =
N1.getValueType();
5389 if (
ADDC.getMinSignedBits() <= 64 &&
5393 SRLC.getZExtValue());
5418 const APInt &AndMask =
CAnd->getAPIntValue();
5465 if (!
AndC->getAPIntValue().isMask())
5474 (!LegalOperations ||
5482 if (!
LoadN->isSimple())
5490 if (LegalOperations &&
5511 if (!
MemVT.isRound())
5515 if (!
LDST->isSimple())
5531 assert(ShAmt % 8 == 0 &&
"ShAmt is byte offset");
5537 LDST->getMemOperand()->getFlags()))
5553 if (LegalOperations &&
5562 if (
Load->getNumValues() > 2)
5569 Load->getMemoryVT().getSizeInBits() <
MemVT.getSizeInBits() + ShAmt)
5578 if (
Store->getMemoryVT().getSizeInBits() <
MemVT.getSizeInBits() + ShAmt)
5581 if (LegalOperations &&
5588bool DAGCombiner::SearchForAndLoads(
SDNode *
N,
5595 for (
SDValue Op :
N->op_values()) {
5596 if (
Op.getValueType().isVector())
5602 (
Mask->getAPIntValue() &
C->getAPIntValue()) !=
C->getAPIntValue())
5607 if (!
Op.hasOneUse())
5610 switch(
Op.getOpcode()) {
5624 Loads.push_back(Load);
5636 Op.getOperand(0).getValueType();
5640 if (
ExtVT.bitsGE(VT))
5661 for (
unsigned i = 0, e =
NodeToMask->getNumValues(); i <
e; ++i) {
5677bool DAGCombiner::BackwardsPropagateMask(
SDNode *
N) {
5682 if (!
Mask->getAPIntValue().isMask())
5693 if (Loads.size() == 0)
5707 if (
And.getOpcode() == ISD ::AND)
5726 for (
auto *Load : Loads) {
5731 if (
And.getOpcode() == ISD ::AND)
5736 "Shouldn't be masking the load if it can't be narrowed");
5750SDValue DAGCombiner::unfoldExtremeBitClearingToShifts(
SDNode *
N) {
5776 Y = M->getOperand(1);
5789 EVT VT =
N->getValueType(0);
5806 EVT VT =
And->getValueType(0);
5834 const APInt &ShiftAmt =
Srl.getConstantOperandAPInt(1);
5857 EVT VT =
N1.getValueType();
5874 if (!
XorC || !
XorC->getAPIntValue().isSignMask() ||
5888 EVT VT =
N1.getValueType();
5931 LoadVT.getVectorElementType().getScalarSizeInBits();
5932 if (Splat->getAPIntValue().isMask(ElementSize)) {
5971 return RHS->getAPIntValue().isSubsetOf(LHS->getAPIntValue());
6008 N0 : N0.getOperand(0) );
6018 unsigned SplatBitSize;
6038 SplatBitSize <
EltBitWidth; SplatBitSize = SplatBitSize * 2)
6045 for (
unsigned i = 0,
n = (SplatBitSize /
EltBitWidth); i <
n; ++i)
6055 Load->getValueType(0),
6056 Load->getMemoryVT());
6064 switch (
Load->getExtensionType()) {
6065 default:
B =
false;
break;
6082 Load->getChain(),
Load->getBasePtr(),
6083 Load->getOffset(),
Load->getMemoryVT(),
6084 Load->getMemOperand());
6086 if (
Load->getNumValues() == 3) {
6090 CombineTo(Load, To, 3,
true);
6109 GN0->getBasePtr(),
GN0->getIndex(),
GN0->getScale()};
6165 SubRHS.getOperand(0).getScalarValueSizeInBits() == 1)
6168 SubRHS.getOperand(0).getScalarValueSizeInBits() == 1)
6191 ((!LegalOperations &&
LN0->isSimple()) ||
6230 if (!
C->getAPIntValue().isMask(
6231 LHS.getOperand(0).getValueType().getFixedSizeInBits()))
6251 if (!LegalOperations)
6254 EVT VT =
N->getValueType(0);
6273 if (!
N01C || (
N01C->getZExtValue() != 0xFF00 &&
6274 N01C->getZExtValue() != 0xFFFF))
6281 if (!
N1.getNode()->hasOneUse())
6284 if (!
N11C ||
N11C->getZExtValue() != 0xFF)
6286 N1 =
N1.getOperand(0);
6301 if (
N01C->getZExtValue() != 8 ||
N11C->getZExtValue() != 8)
6307 if (!
N00.getNode()->hasOneUse())
6318 if (!
N10.getNode()->hasOneUse())
6323 if (!
N101C || (
N101C->getZExtValue() != 0xFF00 &&
6324 N101C->getZExtValue() != 0xFFFF))
6368 if (!
N.getNode()->hasOneUse())
6371 unsigned Opc =
N.getOpcode();
6390 switch (
N1C->getZExtValue()) {
6415 if (!
C ||
C->getZExtValue() != 8)
6423 if (!
C ||
C->getZExtValue() != 8)
6432 if (!
C ||
C->getZExtValue() != 8)
6440 if (!
C ||
C->getZExtValue() != 8)
6459 if (!
C ||
C->getAPIntValue() != 16)
6461 Parts[0] = Parts[1] =
N.getOperand(0).getOperand(0).getNode();
6476 "MatchBSwapHWordOrAndAnd: expecting i32");
6488 if (
Mask0->getAPIntValue() != 0xff00ff00 ||
6489 Mask1->getAPIntValue() != 0x00ff00ff)
6517 if (!LegalOperations)
6520 EVT VT =
N->getValueType(0);
6559 if (Parts[0] != Parts[1] || Parts[0] != Parts[2] || Parts[0] != Parts[3])
6581 EVT VT =
N1.getValueType();
6585 if (!LegalOperations && (N0.
isUndef() ||
N1.isUndef()))
6651 EVT VT =
N1.getValueType();
6700 for (
int i = 0; i !=
NumElts; ++i) {
6701 int M0 =
SV0->getMaskElt(i);
6702 int M1 =
SV1->getMaskElt(i);
6721 assert((
M0 >= 0 ||
M1 >= 0) &&
"Undef index!");
6779 return !
C1 || !
C2 ||
C1->getAPIntValue().intersects(
C2->getAPIntValue());
6786 AddToWorklist(
IOR.getNode());
6824 Mask = Op.getOperand(1);
6825 return Op.getOperand(0);
6871 "Existing shift must be valid as a rotate half");
7024 unsigned Bits =
Log2_64(EltSize);
7025 if (
NegC->getAPIntValue().getActiveBits() <= Bits &&
7026 ((
NegC->getAPIntValue() | Known.
Zero).countTrailingOnes() >= Bits)) {
7047 ((
PosC->getAPIntValue() | Known.
Zero).countTrailingOnes() >=
7068 Width =
NegC->getAPIntValue();
7081 Width =
PosC->getAPIntValue() +
NegC->getAPIntValue();
7091 return Width == EltSize;
7195 EVT VT = LHS.getValueType();
7209 LHS.getOperand(0).getValueType() == RHS.getOperand(0).getValueType()) {
7210 assert(LHS.getValueType() == RHS.getValueType());
7282 return (LHS->getAPIntValue() + RHS->getAPIntValue()) ==
EltSizeInBits;
7376struct ByteProvider {
7381 unsigned ByteOffset = 0;
7383 ByteProvider() =
default;
7386 return ByteProvider(Load, ByteOffset);
7389 static ByteProvider
getConstantZero() {
return ByteProvider(
nullptr, 0); }
7399 ByteProvider(
LoadSDNode *Load,
unsigned ByteOffset)
7400 :
Load(
Load), ByteOffset(ByteOffset) {}
7418 bool Root =
false) {
7423 if (!Root && !Op.hasOneUse())
7426 assert(Op.getValueType().isScalarInteger() &&
"can't handle other types");
7427 unsigned BitWidth = Op.getValueSizeInBits();
7434 switch (Op.getOpcode()) {
7443 if (LHS->isConstantZero())
7445 if (RHS->isConstantZero())
7460 ? ByteProvider::getConstantZero()
7484 if (!L->isSimple() || L->isIndexed())
7496 return ByteProvider::getMemory(L, Index);
7522 for (
unsigned i = 0; i < Width; i++) {
7536 switch (
Value.getOpcode()) {
7581 !
N->isSimple() ||
N->isIndexed())
7594 Stores.push_back(Store);
7595 Chain =
Store->getChain();
7598 if (Stores.size() < 2)
7603 unsigned NumStores = Stores.size();
7617 for (
auto Store : Stores) {
7651 WideVal.getScalarValueSizeInBits() >
7687 if (!Allowed || !
Fast)
7694 for (
unsigned i = 0; i != NumStores; ++i)
7698 for (
unsigned i = 0,
j = NumStores - 1; i != NumStores; ++i, --
j)
7707 bool NeedRotate =
false;
7721 "Unexpected store value to merge");
7730 }
else if (NeedRotate) {
7777 "Can only match load combining against OR nodes");
7780 EVT VT =
N->getValueType(0);
7787 assert(
P.isMemory() &&
"Must be a memory byte provider");
7788 unsigned LoadBitWidth =
P.Load->getMemoryVT().getSizeInBits();
7790 "can only analyze providers for individual bytes not bit");
7808 for (
int i =
ByteWidth - 1; i >= 0; --i) {
7813 if (
P->isConstantZero()) {
7820 assert(
P->isMemory() &&
"provenance should either be memory or zero");
7825 "Must be enforced by calculateByteProvider");
7832 else if (Chain !=
LChain)
7855 assert(!Loads.empty() &&
"All the bytes of the value must be loaded from "
7856 "memory, so there must be at least one load which produces the value");
7857 assert(
Base &&
"Base address of the accessed memory location must be set");
7865 if (!
MemVT.isSimple())
7871 if (LegalOperations &&
7880 if (!IsBigEndian.hasValue())
7918 if (!Allowed || !
Fast)
7937 SDLoc(
N), LegalOperations))
7965 EVT VT =
N->getValueType(0);
8093 LHS.getValueType());
8094 if (!LegalOperations ||
8114 recursivelyDeleteUnusedNodes(N0.
getNode());
8130 AddToWorklist(V.getNode());
8142 AddToWorklist(
N00.getNode()); AddToWorklist(
N01.getNode());
8154 AddToWorklist(
N00.getNode()); AddToWorklist(
N01.getNode());
8179 AddToWorklist(
NotX.getNode());
8194 if (
XorC->getAPIntValue() ==
Ones) {
8273 if (!LogicOp.hasOneUse())
8285 assert(
C1Node &&
"Expected a shift with constant operand");
8289 if (V.getOpcode() !=
ShiftOpcode || !V.hasOneUse())
8316 Y = LogicOp.getOperand(1);
8318 Y = LogicOp.getOperand(0);
8362 switch (LHS.getOpcode()) {
8399 EVT VT =
N->getValueType(0);
8415 if (
N->hasOneUse() &&
N->getOperand(0).hasOneUse() &&
8423 AddToWorklist(
Trunc00.getNode());
8424 AddToWorklist(
Trunc01.getNode());
8436 EVT VT =
N->getValueType(0);
8461 return DAG.
getNode(
N->getOpcode(), dl, VT, N0,
Amt);
8486 if (
C1 &&
C2 &&
C1->getValueType(0) ==
C2->getValueType(0)) {
8563 APInt c1 = LHS->getAPIntValue();
8564 APInt c2 = RHS->getAPIntValue();
8573 APInt c1 = LHS->getAPIntValue();
8574 APInt c2 = RHS->getAPIntValue();
8601 APInt c1 = LHS->getAPIntValue();
8602 APInt c2 = RHS->getAPIntValue();
8614 APInt c1 = LHS->getAPIntValue();
8615 APInt c2 = RHS->getAPIntValue();
8640 APInt c1 = LHS->getAPIntValue();
8641 APInt c2 = RHS->getAPIntValue();
8652 AddToWorklist(
NewSHL.getNode());
8695 Mask.lshrInPlace(c1 - c2);
8727 AddToWorklist(
Shl0.getNode());
8728 AddToWorklist(
Shl1.getNode());
8741 if (
N1C && !
N1C->isOpaque())
8758 if (
ShlVal.ult(
C0.getBitWidth())) {
8774 "SRL or SRA node is required here!");
8805 ?
Constant->getAPIntValue().getMinSignedBits()
8806 :
Constant->getAPIntValue().getActiveBits();
8824 "Cannot have a multiply node with two different operand types.");
8888 if (!LegalOperations ||
8908 APInt c1 = LHS->getAPIntValue();
8909 APInt c2 = RHS->getAPIntValue();
8923 "Expected matchBinaryPredicate to return one element for "
8949 int ShiftAmt =
N1C->getZExtValue() -
N01C->getZExtValue();
8955 if ((ShiftAmt > 0) &&
8967 N->getValueType(0), Trunc);
8983 unsigned ShiftAmt =
N1C->getZExtValue();
9024 if (
LargeShift->getAPIntValue() == TruncBits) {
9043 if (
N1C && !
N1C->isOpaque())
9090 APInt c1 = LHS->getAPIntValue();
9091 APInt c2 = RHS->getAPIntValue();
9100 APInt c1 = LHS->getAPIntValue();
9101 APInt c2 = RHS->getAPIntValue();
9158 AddToWorklist(
Mask.getNode());
9167 unsigned BitSize =
SmallVT.getScalarSizeInBits();
9168 if (
N1C->getAPIntValue().uge(BitSize))
9214 unsigned ShAmt =
UnknownBits.countTrailingZeros();
9222 AddToWorklist(
Op.getNode());
9243 if (
N1C && !
N1C->isOpaque())
9268 if (
N->hasOneUse()) {
9289 EVT VT =
N->getValueType(0);
9344 if (LHS && RHS && LHS->isSimple() && RHS->isSimple() &&
9345 LHS->getAddressSpace() == RHS->getAddressSpace() &&
9356 RHS->getMemOperand()->getFlags(), &
Fast) &&
9360 AddToWorklist(
NewPtr.getNode());
9364 RHS->getMemOperand()->getFlags(), RHS->getAAInfo());
9366 WorklistRemover DeadNodes(*
this);
9429 Op0 =
AbsOp1.getOperand(0);
9430 Op1 =
AbsOp1.getOperand(1);
9454 EVT VT =
N->getValueType(0);
9474 EVT VT =
N->getValueType(0);
9498 EVT VT =
N->getValueType(0);
9511 EVT VT =
N->getValueType(0);
9528 EVT VT =
N->getValueType(0);
9538 EVT VT =
N->getValueType(0);
9555 EVT VT =
N->getValueType(0);
9565 EVT VT =
N->getValueType(0);
9579 EVT VT = LHS.getValueType();
9591 if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True))
9611 return DAG.
getNode(Opcode,
DL, VT, LHS, RHS);
9626 return DAG.
getNode(Opcode,
DL, VT, LHS, RHS);
9643 EVT VT =
N->getValueType(0);
9645 VT !=
Cond.getOperand(0).getValueType())
9675 EVT VT =
N->getValueType(0);
9692 if (
C1->isZero() &&
C2->isOne()) {
9699 if (
C1->isZero() &&
C2->isAllOnes()) {
9706 if (
C1->isOne() &&
C2->isZero()) {
9712 if (
C1->isAllOnes() &&
C2->isZero()) {
9763 if (
CondVT.isInteger() &&
9768 C1->isZero() &&
C2->isOne()) {
9781 "Expected a (v)select");
9784 EVT VT =
N->getValueType(0);
9817 EVT VT =
N->getValueType(0);
9824 if (VT !=
Cond0.getValueType())
9875 EVT VT =
N->getValueType(0);
9916 recursivelyDeleteUnusedNodes(
InnerSelect.getNode());
9929 recursivelyDeleteUnusedNodes(
InnerSelect.getNode());
10002 if (
C &&
NotC &&
C->getAPIntValue() == ~
NotC->getAPIntValue()) {
10022 (!LegalOperations &&
10051 EVT VT =
N->getValueType(0);
10059 if (LHS->getNumOperands() != 2 || RHS->getNumOperands() != 2)
10068 for (
int i = 0; i <
NumElems / 2; ++i) {
10069 if (
Cond->getOperand(i)->isUndef())
10081 if (
Cond->getOperand(i)->isUndef())
10086 else if (
Cond->getOperand(i).getNode() !=
TopHalf)
10091 "One half of the selector was all UNDEFs and the other was all the "
10092 "same value. This should have been addressed before this function.");
10095 BottomHalf->isZero() ? RHS->getOperand(0) : LHS->getOperand(0),
10096 TopHalf->isZero() ? RHS->getOperand(1) : LHS->getOperand(1));
10110 Index = Index.getOperand(1);
10120 SDValue Op = Index.getOperand(0);
10129 SDValue Op = Index.getOperand(0);
10158 MSC->getMemOperand(),
MSC->getIndexType(),
MSC->isTruncatingStore());
10165 MSC->getMemOperand(),
MSC->getIndexType(),
MSC->isTruncatingStore());
10197 Value.getValueType().isInteger() &&
10225 Value.getOperand(0).getValueType());
10252 MGT->getMemoryVT(),
DL, Ops,
10253 MGT->getMemOperand(),
MGT->getIndexType(),
10254 MGT->getExtensionType());
10260 MGT->getMemoryVT(),
DL, Ops,
10261 MGT->getMemOperand(),
MGT->getIndexType(),
10262 MGT->getExtensionType());
10275 return CombineTo(
N,
MLD->getPassThru(),
MLD->getChain());
10282 N->getValueType(0),
SDLoc(
N),
MLD->getChain(),
MLD->getBasePtr(),
10283 MLD->getPointerInfo(),
MLD->getOriginalAlign(),
10285 return CombineTo(
N, NewLd, NewLd.
getValue(1));
10301 EVT VT =
N->getValueType(0);
10302 if (!
Cond.hasOneUse() ||
Cond.getScalarValueSizeInBits() != 1 ||
10314 for (
unsigned i = 0; i !=
Elts; ++i) {
10319 if (
N1Elt.getValueType() !=
N2Elt.getValueType())
10364 EVT VT =
N->getValueType(0);
10385 bool isAbs =
false;
10393 N2 == LHS &&
N1.getOpcode() ==
ISD::SUB && N2 ==
N1.getOperand(1))
10404 AddToWorklist(Shift.
getNode());
10405 AddToWorklist(
Add.getNode());
10433 EVT WideVT =
N1.getValueType().changeVectorElementTypeToInteger();
10439 if (LHS.getOpcode() ==
ISD::LOAD && LHS.hasOneUse() &&
10494 return Cond->getAPIntValue() == ~Op->getAPIntValue();
10533 if (
OpLHS == LHS) {
10549 return (!Op && !
Cond) ||
10551 Cond->getAPIntValue() == (-
Op->getAPIntValue() - 1));
10628 AddToWorklist(
SCC.getNode());
10631 if (!
SCCC->isZero())
10635 }
else if (
SCC->isUndef()) {
10643 SCC.getOperand(1), N2, N3,
SCC.getOperand(2));
10662 N->hasOneUse() &&
N->use_begin()->getOpcode() ==
ISD::BRCOND;
10665 EVT VT =
N->getValueType(0);
10685 bool Updated =
false;
10697 return True || False;
10709 N1 =
N1->getOperand(0);
10761 if (!
N.hasOneUse())
10789 unsigned Opcode =
N->getOpcode();
10791 EVT VT =
N->getValueType(0);
10796 "Expected EXTEND dag node in input!");
10832 unsigned Opcode =
N->getOpcode();
10834 EVT VT =
N->getValueType(0);
10840 &&
"Expected EXTEND dag node in input!");
10846 return DAG.
getNode(Opcode,
DL, VT, N0);
10893 for (
unsigned i = 0; i !=
NumElts; ++i) {
10895 if (Op.isUndef()) {
10929 if (UI.getUse().getResNo() != N0.
getResNo())
10938 for (
unsigned i = 0; i != 2; ++i) {
10985 for (
unsigned j = 0;
j != 2; ++
j) {
10993 Ops.push_back(
SetCC->getOperand(2));
11006 "Unexpected node type (not an extend)!");
11032 !
DstVT.isVector() || !
DstVT.isPow2VectorType() ||
11055 assert(!
DstVT.isScalableVector() &&
"Unexpected scalable vector type");
11060 const unsigned Stride =
SplitSrcVT.getStoreSize();
11072 LN0->getMemOperand()->getFlags(),
LN0->getAAInfo());
11076 Loads.push_back(
SplitLoad.getValue(0));
11077 Chains.push_back(
SplitLoad.getValue(1));
11084 AddToWorklist(
NewChain.getNode());
11086 CombineTo(
N, NewValue);
11101 EVT VT =
N->getValueType(0);
11102 EVT OrigVT =
N->getOperand(0).getValueType();
11146 Load->getChain(),
Load->getBasePtr(),
11147 Load->getMemoryVT(),
Load->getMemOperand());
11165 CombineTo(Load, Trunc,
ExtLoad.getValue(1));
11169 recursivelyDeleteUnusedNodes(N0.
getNode());
11183 "Unexpected opcode for vector select narrowing/widening");
11188 EVT VT =
Cast->getValueType(0);
11223 bool LegalOperations,
SDNode *
N,
11234 if ((LegalOperations || !
LN0->isSimple() ||
11244 if (
LN0->use_empty())
11259 ((LegalOperations || VT.
isVector() ||
11276 LN0->getMemOperand());
11313 VT, dl,
Ld->getChain(),
Ld->getBasePtr(),
Ld->getOffset(),
Ld->getMask(),
11314 PassThru,
Ld->getMemoryVT(),
Ld->getMemOperand(),
Ld->getAddressingMode(),
11321 bool LegalOperations) {
11333 EVT VT =
N->getValueType(0);
11364 EVT VT =
N->getValueType(0);
11371 if (VT.
isVector() && !LegalOperations &&
11424 if (UI.getUse().getResNo() != 0 ||
User == N0.
getNode())
11468 if (
SetCCVT.getScalarSizeInBits() != 1 &&
11480 EVT VT =
N->getValueType(0);
11499 AddToWorklist(
oye);
11507 unsigned OpBits =
Op.getScalarValueSizeInBits();
11578 LN00->getChain(),
LN00->getBasePtr(),
11579 LN00->getMemoryVT(),
11580 LN00->getMemOperand());
11675 Op =
N->getOperand(0);
11681 N.getValueType().getScalarType() !=
MVT::i1 ||
11726 EVT VT =
N->getValueType(0);
11745 APInt(
Op.getScalarValueSizeInBits(), 0) :
11746 APInt::getBitsSet(
Op.getScalarValueSizeInBits(),
11747 N0.getScalarValueSizeInBits(),
11748 std::
min(
Op.getScalarValueSizeInBits(),
11763 AddToWorklist(
oye);
11778 AddToWorklist(
Op.getNode());
11788 AddToWorklist(
Op.getNode());
11858 LN00->getChain(),
LN00->getBasePtr(),
11859 LN00->getMemoryVT(),
11860 LN00->getMemOperand());
11902 if (!LegalOperations && VT.
isVector() &&
11955 InnerZExt.getOperand(0).getValueSizeInBits();
11985 EVT VT =
N->getValueType(0);
12006 AddToWorklist(
oye);
12051 LN0->getChain(),
LN0->getBasePtr(),
12059 recursivelyDeleteUnusedNodes(
LN0);
12079 VT,
LN0->getChain(),
LN0->getBasePtr(),
12083 recursivelyDeleteUnusedNodes(
LN0);
12094 if (VT.
isVector() && !LegalOperations) {
12139 unsigned Opcode =
N->getOpcode();
12159 "Asserting zero/sign-extended bits to a type larger than the "
12160 "truncated destination does not provide information");
12179 "Asserting zero/sign-extended bits to a type larger than the "
12180 "truncated destination does not provide information");
12185 BigA.getOperand(0),
N1);
12203 std::max(AL,
AAN->getAlign()));
12236 unsigned Opc =
N->getOpcode();
12240 EVT VT =
N->getValueType(0);
12250 unsigned ShAmt = 0;
12274 ShAmt =
N1C->getZExtValue();
12287 LN->getExtensionType() != ExtType)
12297 if (
Mask.isMask()) {
12299 }
else if (
Mask.isShiftedMask()) {
12300 ShAmt =
Mask.countTrailingZeros();
12323 if (!
SRL.hasOneUse())
12336 ShAmt =
SRL1C->getZExtValue();
12371 if ((
ExtVT.getScalarSizeInBits() >
MaskedVT.getScalarSizeInBits()) &&
12377 N0 =
SRL.getOperand(0);
12401 if (!
LN0->isSimple() ||
12407 LN0->getMemoryVT().getStoreSizeInBits().getFixedSize();
12425 AddToWorklist(
NewPtr.getNode());
12431 LN0->getMemOperand()->getFlags(),
LN0->getAAInfo());
12439 WorklistRemover DeadNodes(*
this);
12476 EVT VT =
N->getValueType(0);
12505 unsigned N00Bits =
N00.getScalarValueSizeInBits();
12519 unsigned N00Bits =
N00.getScalarValueSizeInBits();
12521 unsigned SrcElts =
N00.getValueType().getVectorMinNumElements();
12527 (!LegalOperations ||
12584 LN0->getMemOperand());
12587 AddToWorklist(
ExtLoad.getNode());
12601 LN0->getMemOperand());
12614 VT,
SDLoc(
N),
Ld->getChain(),
Ld->getBasePtr(),
Ld->getOffset(),
12615 Ld->getMask(),
Ld->getPassThru(),
ExtVT,
Ld->getMemOperand(),
12629 GN0->getBasePtr(),
GN0->getIndex(),
GN0->getScale()};
12637 AddToWorklist(
ExtLoad.getNode());
12654 EVT VT =
N->getValueType(0);
12671 EVT VT =
N->getValueType(0);
12686 if (
C.getNode() !=
N)
12768 if (
AmtVT !=
Amt.getValueType()) {
12770 AddToWorklist(
Amt.getNode());
12787 for (
const SDValue &Op : N0->op_values()) {
12814 "Invalid number of elements");
12846 if (
LN0->isSimple() &&
LN0->getMemoryVT().bitsLT(VT)) {
12848 VT,
LN0->getChain(),
LN0->getBasePtr(),
12849 LN0->getMemoryVT(),
12850 LN0->getMemOperand());
12863 unsigned NumDefs = 0;
12867 if (!
X.isUndef()) {
12878 X.getValueType().getVectorElementCount()));
12884 if (NumDefs == 1) {
12885 assert(V.getNode() &&
"The single defined operand is empty!");
12887 for (
unsigned i = 0, e = VTs.size(); i !=
e; ++i) {
12893 AddToWorklist(
NV.getNode());
12894 Opnds.push_back(NV);
12908 (!LegalOperations ||
12912 unsigned Idx = isLE ? 0 :
VecSrcVT.getVectorNumElements() - 1;
12946 if (
N00.getOperand(0)->getValueType(0).getVectorElementType() ==
12967 if (!LegalOperations && N0.
hasOneUse() &&
12985 if (!LegalOperations && N0.
hasOneUse() &&
13002 return Elt.getNode();
13003 return Elt.getOperand(
Elt.getResNo()).getNode();
13021 !
LD1->hasOneUse() || !
LD2->hasOneUse() ||
13022 LD1->getAddressSpace() !=
LD2->getAddressSpace())
13033 LD1->getPointerInfo(),
LD1->getAlign());
13048 EVT VT =
N->getValueType(0);
13086 LogicOp0.getOperand(0).getValueType() == VT) {
13099 EVT VT =
N->getValueType(0);
13124 if (!LegalOperations ||
13130 if (
C.getNode() !=
N)
13155 *
LN0->getMemOperand())) {
13158 LN0->getPointerInfo(),
LN0->getAlign(),
13159 LN0->getMemOperand()->getFlags(),
LN0->getAAInfo());
13185 AddToWorklist(
NewConv.getNode());
13195 AddToWorklist(
FlipBit.getNode());
13202 AddToWorklist(
Hi.getNode());
13204 AddToWorklist(
FlipBit.getNode());
13208 AddToWorklist(
FlipBits.getNode());
13238 AddToWorklist(
X.getNode());
13244 AddToWorklist(
X.getNode());
13250 X.getValueType(),
X,
13252 X.getValueType()));
13253 AddToWorklist(
X.getNode());
13255 AddToWorklist(
X.getNode());
13261 AddToWorklist(Cst.
getNode());
13263 AddToWorklist(
X.getNode());
13274 AddToWorklist(
FlipBit.getNode());
13277 AddToWorklist(
FlipBits.getNode());
13283 AddToWorklist(
X.getNode());
13288 AddToWorklist(Cst.
getNode());
13313 Op.getOperand(0).getValueType() == VT)
13332 for (
int M :
SVN->getMask())
13346 EVT VT =
N->getValueType(0);
13381 AddToWorklist(Ops.back().getNode());
13384 BV->getValueType(0).getVectorNumElements());
13425 for (
unsigned I = 0,
E =
RawBits.size();
I !=
E; ++
I) {
13442 N->getFlags().hasAllowContract();
13447 return Options.NoInfsFPMath ||
N.getNode()->getFlags().hasNoInfs();
13454 EVT VT =
N->getValueType(0);
13472 Options.UnsafeFPMath ||
N->getFlags().hasAllowReassociation();
13487 unsigned Opcode =
N.getOpcode();
13515 N1.getOperand(1), N0);
13528 N1.getOperand(2).getOpcode() ==
ISD::FMUL &&
N1.hasOneUse() &&
13529 N1.getOperand(2).hasOneUse()) {
13549 N00.getValueType())) {
13563 N10.getValueType())) {
13589 N020.getValueType())) {
13591 N020.getOperand(0),
N020.getOperand(1),
13617 N00.getValueType())) {
13619 N002.getOperand(0),
N002.getOperand(1),
13633 N120.getValueType())) {
13635 N120.getOperand(0),
N120.getOperand(1),
13652 N10.getValueType())) {
13654 N102.getOperand(0),
N102.getOperand(1),
13668 EVT VT =
N->getValueType(0);
13723 YZ.getOperand(1),
X);
13765 N00.getValueType())) {
13780 N10.getValueType())) {
13801 N00.getValueType())) {
13824 N000.getValueType())) {
13836 return Options.UnsafeFPMath ||
N->getFlags().hasAllowReassociation();
13845 unsigned Opcode =
N.getOpcode();
13851 bool CanFuse =
Options.UnsafeFPMath ||
N->getFlags().hasAllowContract();
13887 N020.getValueType())) {
13911 N00.getValueType())) {
13932 N120.getValueType())) {
13958 CvtSrc.getValueType())) {
13980SDValue DAGCombiner::visitFMULForFMADistributiveCombine(
SDNode *
N) {
13983 EVT VT =
N->getValueType(0);
14020 if (
C->isExactlyValue(+1.0))
14023 if (
C->isExactlyValue(-1.0))
14043 if (
C0->isExactlyValue(+1.0))
14047 if (
C0->isExactlyValue(-1.0))
14053 if (
C1->isExactlyValue(+1.0))
14056 if (
C1->isExactlyValue(-1.0))
14077 EVT VT =
N->getValueType(0);
14101 if (
N1C &&
N1C->isZero())
14102 if (
N1C->isNegative() ||
Options.NoSignedZerosFPMath || Flags.hasNoSignedZeros())
14111 N1, DAG, LegalOperations, ForCodeSize))
14117 N0, DAG, LegalOperations, ForCodeSize))
14124 return C &&
C->isExactlyValue(-2.0);
14159 (Flags.hasAllowReassociation() && Flags.hasNoSignedZeros())) &&
14185 N1.getOperand(0) ==
N1.getOperand(1) &&
14227 if (!
CFP10 &&
N1.getOperand(0) ==
N1.getOperand(1) &&
14228 N1.getOperand(0) == N0) {
14237 N1.getOperand(0) ==
N1.getOperand(1) &&
14247 AddToWorklist(
Fused.getNode());
14257 EVT VT =
N->getValueType(0);
14265 N1, DAG, LegalOperations, ForCodeSize)) {
14267 {Chain, N0, NegN1});
14273 N0, DAG, LegalOperations, ForCodeSize)) {
14275 {Chain, N1, NegN0});
14285 EVT VT =
N->getValueType(0);
14308 if (!
N1CFP->isNegative() ||
Options.NoSignedZerosFPMath ||
14309 Flags.hasNoSignedZeros()) {
14316 if (
Options.NoNaNsFPMath || Flags.hasNoNaNs())
14322 if (
N0CFP->isNegative() ||
14323 (
Options.NoSignedZerosFPMath || Flags.hasNoSignedZeros())) {
14340 (Flags.hasAllowReassociation() && Flags.hasNoSignedZeros())) &&
14343 if (N0 ==
N1->getOperand(0))
14346 if (N0 ==
N1->getOperand(1))
14357 AddToWorklist(
Fused.getNode());
14368 EVT VT =
N->getValueType(0);
14394 if (
Options.UnsafeFPMath || Flags.hasAllowReassociation()) {
14447 if (Flags.hasNoNaNs() && Flags.hasNoSignedZeros() &&
14493 AddToWorklist(
Fused.getNode());
14506 EVT VT =
N->getValueType(0);
14512 bool UnsafeFPMath =
14513 Options.UnsafeFPMath ||
N->getFlags().hasAllowReassociation();
14536 if (UnsafeFPMath) {
14553 if (UnsafeFPMath) {
14574 if (
N1CFP->isExactlyValue(1.0))
14577 if (
N1CFP->isExactlyValue(-1.0) &&
14580 AddToWorklist(
RHSNeg.getNode());
14594 if (UnsafeFPMath) {
14596 if (
N1CFP && N0 == N2) {
14614 SDValue(
N, 0), DAG, LegalOperations, ForCodeSize))
14632 if (LegalDAG || (!
UnsafeMath && !Flags.hasAllowReciprocal()))
14638 if (
N0CFP && (
N0CFP->isExactlyValue(1.0) ||
N0CFP->isExactlyValue(-1.0)))
14648 EVT VT =
N->getValueType(0);
14658 for (
auto *U :
N1->
uses()) {
14659 if (U->getOpcode() ==
ISD::FDIV && U->getOperand(1) ==
N1) {
14661 if (U->getOperand(1).getOpcode() ==
ISD::FSQRT &&
14662 U->getOperand(0) == U->getOperand(1).getOperand(0) &&
14663 U->getFlags().hasAllowReassociation() &&
14664 U->getFlags().hasNoSignedZeros())
14669 if (
UnsafeMath || U->getFlags().hasAllowReciprocal())
14684 for (
auto *U :
Users) {
14686 if (Dividend !=
FPOne) {
14689 CombineTo(U, NewNode);
14702 EVT VT =
N->getValueType(0);
14726 if (
Options.UnsafeFPMath || Flags.hasAllowReciprocal()) {
14736 (!LegalOperations ||
14756 AddToWorklist(
RV.getNode());
14764 AddToWorklist(
RV.getNode());
14772 Sqrt =
N1.getOperand(0);
14773 Y =
N1.getOperand(1);
14774 }
else if (
N1.getOperand(1).getOpcode() ==
ISD::FSQRT) {
14775 Sqrt =
N1.getOperand(1);
14776 Y =
N1.getOperand(0);
14778 if (
Sqrt.getNode()) {
14781 if (Flags.hasAllowReassociation() &&
N1.hasOneUse() &&
14782 N1->getFlags().hasAllowReassociation() &&
Sqrt.hasOneUse()) {
14785 A =
Y.getOperand(0);
14786 else if (
Y ==
Sqrt.getOperand(0))
14798 recursivelyDeleteUnusedNodes(
AAZ.getNode());
14806 AddToWorklist(Div.
getNode());
14813 if (
Options.NoInfsFPMath || Flags.hasNoInfs())
14819 if ((
Options.NoSignedZerosFPMath || Flags.hasNoSignedZeros()) &&
14820 (
Options.UnsafeFPMath || Flags.hasAllowReassociation()))
14844 EVT VT =
N->getValueType(0);
14867 if (!Flags.hasApproximateFuncs() ||
14868 (!
Options.NoInfsFPMath && !Flags.hasNoInfs()))
14915 EVT VT =
N->getValueType(0);
14926 if (!V.isNegative()) {
14969 EVT VT =
N->getValueType(0);
14979 if (!Flags.hasNoSignedZeros() || !Flags.hasNoInfs() || !Flags.hasNoNaNs() ||
14980 !Flags.hasApproximateFuncs())
15010 if ((!Flags.hasNoSignedZeros() &&
ExponentIs025) || !Flags.hasNoInfs() ||
15011 !Flags.hasApproximateFuncs())
15044 EVT VT =
N->getValueType(0);
15065 EVT VT =
N->getValueType(0);
15075 (!LegalOperations ||
15117 EVT VT =
N->getValueType(0);
15127 (!LegalOperations ||
15157 EVT VT =
N->getValueType(0);
15198 EVT VT =
N->getValueType(0);
15213 EVT VT =
N->getValueType(0);
15230 EVT VT =
N->getValueType(0);
15242 const bool NIsTrunc =
N->getConstantOperandVal(1) == 1;
15271 AddToWorklist(Tmp.
getNode());
15284 EVT VT =
N->getValueType(0);
15287 if (
N->hasOneUse() &&
15305 if (
In.getValueType() == VT)
return In;
15319 LN0->getMemOperand());
15337 EVT VT =
N->getValueType(0);
15348 EVT VT =
N->getValueType(0);
15372 EVT VT =
N->getValueType(0);
15383 EVT VT =
N->getValueType(0);
15400 N->getFlags().hasNoSignedZeros()) && N0.
hasOneUse()) {
15414 EVT VT =
N->getValueType(0);
15416 unsigned Opc =
N->getOpcode();
15442 if (
AF.isInfinity() || (Flags.hasNoInfs() &&
AF.isLargest())) {
15448 return N->getOperand(1);
15455 return N->getOperand(0);
15464 EVT VT =
N->getValueType(0);
15494 N1->getOperand(0), N2);
15507 N1.getOperand(0).getValueType())) {
15509 Chain,
N1.getOperand(2),
15510 N1.getOperand(0),
N1.getOperand(1), N2);
15513 if (
N1.hasOneUse()) {
15528 (
N.getOperand(0).hasOneUse() &&
15529 N.getOperand(0).getOpcode() ==
ISD::SRL))) {
15600 bool Equal =
false;
15638 if (
Simp.getNode()) AddToWorklist(
Simp.getNode());
15643 N->getOperand(0),
Simp.getOperand(2),
15644 Simp.getOperand(0),
Simp.getOperand(1),
15654 if (LD->isIndexed())
15656 EVT VT = LD->getMemoryVT();
15659 Ptr = LD->getBasePtr();
15661 if (ST->isIndexed())
15663 EVT VT = ST->getMemoryVT();
15666 Ptr = ST->getBasePtr();
15669 if (LD->isIndexed())
15671 EVT VT = LD->getMemoryVT();
15675 Ptr = LD->getBasePtr();
15678 if (ST->isIndexed())
15680 EVT VT = ST->getMemoryVT();
15684 Ptr = ST->getBasePtr();
15698bool DAGCombiner::CombineToPreIndexedLoadStore(
SDNode *
N) {
15702 bool IsLoad =
true;
15754 if (Val == BasePtr)
15765 Worklist.push_back(
N);
15773 UE =
BasePtr.getNode()->use_end();
15844 Result.getNode()->dump(&DAG);
dbgs() <<
'\n');
15845 WorklistRemover DeadNodes(*
this);
15860 for (
unsigned i = 0, e =
OtherUses.size(); i !=
e; ++i) {
15865 BasePtr.getNode() &&
"Expected BasePtr operand");
15909 AddToWorklist(
Result.getNode());
15934 for (
SDNode *
Use : BasePtr.getNode()->uses()) {
15940 bool IsLoad =
true;
15946 Worklist.push_back(
Use);
15989 Visited.insert(Ptr.
getNode());
15990 Worklist.push_back(
N);
15991 Worklist.push_back(Op);
16003bool DAGCombiner::CombineToPostIndexedLoadStore(
SDNode *
N) {
16007 bool IsLoad =
true;
16032 dbgs() <<
"\nWith: ";
Result.getNode()->dump(&DAG);
16034 WorklistRemover DeadNodes(*
this);
16047 Result.getValue(IsLoad ? 1 : 0));
16064 "Cannot split out indexing using opaque target constants");
16081 Val =
ST->getValue();
16109 "Attempting to extend value of non-matching type");
16113 switch (
LD->getExtensionType()) {
16137 if (!ST || !
ST->isSimple())
16175 (int64_t)
LDMemType.getStoreSizeInBits().getFixedSize()) /
16191 if (
LD->isIndexed()) {
16198 return CombineTo(LD, Ops, 3);
16200 return CombineTo(LD, Val, Chain);
16225 if (
LD->getBasePtr().isUndef() ||
Offset != 0)
16263 if (
LD->isSimple()) {
16266 if (!
N->hasAnyUseOfValue(0)) {
16274 dbgs() <<
"\nWith chain: "; Chain.getNode()->dump(&DAG);
16276 WorklistRemover DeadNodes(*
this);
16279 if (
N->use_empty())
16294 if (!
N->hasAnyUseOfValue(0) && (
CanSplitIdx || !
N->hasAnyUseOfValue(1))) {
16303 Index = DAG.
getUNDEF(
N->getValueType(1));
16305 dbgs() <<
"\nWith: ";
Undef.getNode()->dump(&DAG);
16306 dbgs() <<
" and 2 other values\n");
16307 WorklistRemover DeadNodes(*
this);
16325 if (*Alignment >
LD->getAlign() &&
16326 isAligned(*Alignment,
LD->getSrcValueOffset())) {
16328 LD->getExtensionType(),
SDLoc(
N),
LD->getValueType(0), Chain, Ptr,
16329 LD->getPointerInfo(),
LD->getMemoryVT(), *Alignment,
16330 LD->getMemOperand()->getFlags(),
LD->getAAInfo());
16338 if (
LD->isUnindexed()) {
16352 LD->getValueType(0),
16354 LD->getMemOperand());
16362 return CombineTo(
N,
ReplLoad.getValue(0), Token);
16392struct LoadedSlice {
16396 bool ForCodeSize =
false;
16399 unsigned Loads = 0;
16400 unsigned Truncates = 0;
16401 unsigned CrossRegisterBanksCopies = 0;
16402 unsigned ZExts = 0;
16403 unsigned Shift = 0;
16405 explicit Cost(
bool ForCodeSize) : ForCodeSize(ForCodeSize) {}
16408 Cost(
const LoadedSlice &LS,
bool ForCodeSize)
16409 : ForCodeSize(ForCodeSize), Loads(1) {
16425 LS.Inst->getValueType(0)))
16431 if (
LS.canMergeExpensiveCrossRegisterBankCopy())
16432 ++CrossRegisterBanksCopies;
16436 Loads += RHS.Loads;
16437 Truncates += RHS.Truncates;
16438 CrossRegisterBanksCopies += RHS.CrossRegisterBanksCopies;
16439 ZExts += RHS.ZExts;
16440 Shift += RHS.Shift;
16445 return Loads == RHS.Loads && Truncates == RHS.Truncates &&
16446 CrossRegisterBanksCopies == RHS.CrossRegisterBanksCopies &&
16447 ZExts == RHS.ZExts && Shift == RHS.Shift;
16450 bool operator!=(
const Cost &RHS)
const {
return !(*
this ==
RHS); }
16452 bool operator<(
const Cost &RHS)
const {
16465 bool operator>(
const Cost &RHS)
const {
return RHS < *
this; }
16467 bool operator<=(
const Cost &RHS)
const {
return !(RHS < *
this); }
16469 bool operator>=(
const Cost &RHS)
const {
return !(*
this <
RHS); }
16488 : Inst(Inst), Origin(Origin), Shift(Shift), DAG(DAG) {}
16498 assert(Origin &&
"No original load to compare against.");
16500 assert(Inst &&
"This slice is not bound to an instruction");
16502 "Extracted slice is bigger than the whole type!");
16512 unsigned SliceSize =
getUsedBits().countPopulation();
16513 assert(!(SliceSize & 0x7) &&
"Size is not a multiple of a byte.");
16514 return SliceSize / 8;
16520 assert(DAG &&
"Missing context");
16535 bool isLegal()
const {
16537 if (!Origin || !Inst || !DAG)
16582 assert(DAG &&
"Missing context.");
16584 assert(!(Shift & 0x7) &&
"Shifts not aligned on Bytes are not supported.");
16588 "The size of the original loaded type is not a multiple of a"
16593 "Invalid shift amount for given loaded size");
16606 assert(Inst && Origin &&
"Unable to replace a non-existing slice.");
16611 assert(
Offset >= 0 &&
"Offset too big to fit in int64_t!");
16647 assert(DAG &&
"Missing context");
16654 Use->getOperand(0)->isDivergent());
16709 const LoadedSlice &
Second) {
16710 assert(First.Origin ==
Second.Origin && First.Origin &&
16711 "Unable to match different memory origins.");
16714 "Slices are not supposed to overlap.");
16733 assert(LHS.Origin == RHS.Origin &&
"Different bases not implemented.");
16734 return LHS.getOffsetFromBase() < RHS.getOffsetFromBase();
16739 const LoadedSlice *First =
nullptr;
16740 const LoadedSlice *
Second =
nullptr;
16810 LoadedSlice::Cost
SliceCost(LS, ForCodeSize);
16829bool DAGCombiner::SliceUpLoad(
SDNode *
N) {
16835 !
LD->getValueType(0).isInteger())
16841 if (
LD->getValueType(0).isScalableVector())
16854 UI !=
UIEnd; ++UI) {
16856 if (UI.getUse().getResNo() != 0)
16860 unsigned Shift = 0;
16865 Shift =
User->getConstantOperandVal(1);
16878 unsigned Width =
User->getValueSizeInBits(0);
16883 LoadedSlice
LS(
User, LD, Shift, &DAG);
16917 "It takes more than a zext to get to the loaded slice!!");
16924 AddToWorklist(Chain.getNode());
16931static std::pair<unsigned, unsigned>
16933 std::pair<unsigned, unsigned> Result(0, 0);
16943 if (LD->getBasePtr() != Ptr)
return Result;
16946 if (V.getValueType() !=
MVT::i16 &&
16967 NotMaskLZ -= 64-V.getValueSizeInBits();
16974 default:
return Result;
16983 if (LD == Chain.getNode())
16988 if (!LD->isOperandOf(Chain.getNode()))
17005 unsigned NumBytes = MaskInfo.first;
17020 if (!
DC->isTypeLegal(VT))
17022 if (
St->getMemOperand() &&
17024 *
St->getMemOperand()))
17055 St->getPointerInfo().getWithOffset(
StOffset),
17056 St->getOriginalAlign());
17065 if (!
ST->isSimple())
17076 unsigned Opc =
Value.getOpcode();
17088 Value.getOperand(1), ST,
this))
17095 Value.getOperand(0), ST,
this))
17110 if (
LD->getBasePtr() != Ptr ||
17111 LD->getPointerInfo().getAddrSpace() !=
17112 ST->getPointerInfo().getAddrSpace())
17117 unsigned BitWidth =
N1.getValueSizeInBits();
17145 if ((Imm & Mask) == Imm) {
17159 LD->getMemOperand()->getFlags(), &
IsFast) ||
17168 LD->getMemOperand()->getFlags(),
LD->getAAInfo());
17176 AddToWorklist(
NewPtr.getNode());
17177 AddToWorklist(
NewLD.getNode());
17178 AddToWorklist(
NewVal.getNode());
17179 WorklistRemover DeadNodes(*
this);
17198 EVT VT =
LD->getMemoryVT();
17200 VT !=
ST->getMemoryVT() ||
17201 LD->isNonTemporal() ||
17202 ST->isNonTemporal() ||
17203 LD->getPointerInfo().getAddrSpace() != 0 ||
17204 ST->getPointerInfo().getAddrSpace() != 0)
17211 if (
VTSize.isScalable())
17229 LD->getPointerInfo(),
LD->getAlign());
17233 ST->getPointerInfo(),
ST->getAlign());
17235 AddToWorklist(
NewLD.getNode());
17236 AddToWorklist(
NewST.getNode());
17237 WorklistRemover DeadNodes(*
this);
17259bool DAGCombiner::isMulAddWithConstProfitable(
SDNode *
MulNode,
17266 if (
AddNode.getNode()->hasOneUse() &&
17281 OtherOp =
Use->getOperand(1).getNode();
17283 OtherOp =
Use->getOperand(0).getNode();
17323 unsigned NumStores) {
17328 for (
unsigned i = 0; i < NumStores; ++i) {
17333 for (
unsigned i = 0; i < NumStores; ++i) {
17334 if (Visited.insert(
StoreNodes[i].MemNode->getChain().getNode()).second)
17335 Chains.push_back(
StoreNodes[i].MemNode->getChain());
17338 assert(Chains.size() > 0 &&
"Chain should have generated a chain");
17342bool DAGCombiner::mergeStoresOfConstantsOrVecElts(
17350 "This optimization cannot emit a vector truncating store");
17361 for (
unsigned I = 0;
I != NumStores; ++
I) {
17364 Flags =
St->getMemOperand()->getFlags();
17365 AAInfo =
St->getAAInfo();
17369 if (Flags !=
St->getMemOperand()->getFlags())
17372 AAInfo = AAInfo.
concat(
St->getAAInfo());
17387 for (
unsigned I = 0;
I != NumStores; ++
I) {
17416 for (
unsigned i = 0; i < NumStores; ++i) {
17440 Ops.push_back(Val);
17458 for (
unsigned i = 0; i < NumStores; ++i) {
17459 unsigned Idx =
IsLE ? (NumStores - 1 - i) : i;
17468 .zextOrTrunc(SizeInBits);
17473 .zextOrTrunc(SizeInBits);
17510 for (
unsigned i = 0; i < NumStores; ++i)
17513 AddToWorklist(
NewChain.getNode());
17517void DAGCombiner::getStoreMergeCandidates(
17524 if (!
BasePtr.getBase().getNode() ||
BasePtr.getBase().isUndef())
17529 assert(
StoreSrc != StoreSource::Unknown &&
"Expected known source for store");
17535 if (
StoreSrc == StoreSource::Load) {
17543 if (!
Ld->hasNUsesOfValue(1, 0))
17547 if (!
Ld->isSimple() ||
Ld->isIndexed())
17551 int64_t &
Offset) ->
bool {
17554 if (!
Other->isSimple() ||
Other->isIndexed())
17557 if (
St->isNonTemporal() !=
Other->isNonTemporal())
17564 case StoreSource::Load: {
17575 if (!
OtherLd->hasNUsesOfValue(1, 0))
17588 case StoreSource::Constant:
17594 case StoreSource::Extract:
17596 if (
Other->isTruncatingStore())
17614 SDNode *RootNode) ->
bool {
17616 return RootCount != StoreRootCountMap.end() &&
17623 if (
UseIter.getOperandNo() != 0)
17650 RootNode =
St->getChain().getNode();
17655 RootNode =
Ldn->getChain().getNode();
17659 for (
auto I2 = (*I)->use_begin(),
E2 = (*I)->use_end();
I2 !=
E2; ++
I2)
17679bool DAGCombiner::checkMergeStoreCandidatesForDependencies(
17694 Worklist.push_back(RootNode);
17695 while (!Worklist.empty()) {
17696 auto N = Worklist.pop_back_val();
17697 if (!Visited.insert(
N).second)
17701 Worklist.push_back(
Op.getNode());
17706 unsigned int Max = 1024 + Visited.size();
17708 for (
unsigned i = 0; i < NumStores; ++i) {
17721 for (
unsigned j = 1;
j <
N->getNumOperands(); ++
j)
17722 Worklist.push_back(
N->getOperand(
j).getNode());
17725 for (
unsigned i = 0; i < NumStores; ++i)
17731 if (Visited.size() >= Max) {
17748 size_t StartIdx = 0;
17749 while ((StartIdx + 1 <
StoreNodes.size()) &&
17765 int64_t StartAddress =
StoreNodes[0].OffsetFromBase;
17768 for (
unsigned i = 1, e =
StoreNodes.size(); i <
e; ++i) {
17783bool DAGCombiner::tryStoreMergeOfConstants(
17790 bool MadeChange =
false;
17800 bool NonZero =
false;
17822 if (
StoreTy.getSizeInBits() > MaximumLegalStoreInBits)
17911bool DAGCombiner::tryStoreMergeOfExtracts(
17917 bool MadeChange =
false;
17932 if (Ty.getSizeInBits() > MaximumLegalStoreInBits)
17990 bool MadeChange =
false;
18022 bool NeedRotate =
false;
18057 bool isDereferenceable =
true;
18059 int64_t StartAddress =
LoadNodes[0].OffsetFromBase;
18061 for (
unsigned i = 1; i <
LoadNodes.size(); ++i) {
18063 if (
LoadNodes[i].MemNode->getChain() != LoadChain)
18071 if (isDereferenceable && !
LoadNodes[i].MemNode->isDereferenceable())
18072 isDereferenceable =
false;
18079 if (
StoreTy.getSizeInBits() > MaximumLegalStoreInBits)
18217 "Unexpected type for rotate-able load pair");
18240 for (
unsigned i = 0; i <
NumElem; ++i) {
18248 for (
unsigned i = 0; i <
NumElem; ++i) {
18252 recursivelyDeleteUnusedNodes(Val.
getNode());
18272 if (
MemVT.isScalableVector())
18274 if (!
MemVT.isSimple() ||
MemVT.getSizeInBits() * 2 > MaximumLegalStoreInBits)
18286 if (
StoreSrc == StoreSource::Unknown)
18301 return LHS.OffsetFromBase < RHS.OffsetFromBase;
18305 Attribute::NoImplicitFloat);
18317 bool MadeChange =
false;
18328 case StoreSource::Constant:
18333 case StoreSource::Extract:
18338 case StoreSource::Load:
18356 if (
ST->isTruncatingStore()) {
18358 ST->getBasePtr(),
ST->getMemoryVT(),
18359 ST->getMemOperand());
18362 ST->getMemOperand());
18370 AddToWorklist(Token.
getNode());
18373 return CombineTo(ST, Token,
false);
18410 bitcastToAPInt().getZExtValue(),
SDLoc(CFP),
18412 return DAG.
getStore(Chain,
DL, Tmp, Ptr,
ST->getMemOperand());
18424 Ptr,
ST->getMemOperand());
18427 if (
ST->isSimple() &&
18442 ST->getOriginalAlign(), MMOFlags, AAInfo);
18445 ST->getPointerInfo().getWithOffset(4),
18446 ST->getOriginalAlign(), MMOFlags, AAInfo);
18464 ST->isUnindexed()) {
18472 if (((!LegalOperations &&
ST->isSimple()) ||
18475 DAG, *
ST->getMemOperand())) {
18477 ST->getMemOperand());
18482 if (
Value.isUndef() &&
ST->isUnindexed())
18488 if (*Alignment >
ST->getAlign() &&
18489 isAligned(*Alignment,
ST->getSrcValueOffset())) {
18492 ST->getMemoryVT(), *Alignment,
18493 ST->getMemOperand()->getFlags(),
ST->getAAInfo());
18510 if (
ST->isUnindexed()) {
18518 Chain =
ST->getChain();
18522 if (
ST->isTruncatingStore() &&
ST->isUnindexed() &&
18523 Value.getValueType().isInteger() &&
18530 Value.getOperand(0).getValueType() ==
ST->getMemoryVT() &&
18533 ST->getMemOperand());
18537 ST->getMemoryVT().getScalarSizeInBits());
18542 AddToWorklist(
Value.getNode());
18545 ST->getMemOperand());
18564 if (
Ld->getBasePtr() == Ptr &&
ST->getMemoryVT() ==
Ld->getMemoryVT() &&
18565 ST->isUnindexed() &&
ST->isSimple() &&
18566 Ld->getAddressSpace() ==
ST->getAddressSpace() &&
18569 Chain.reachesChainWithoutSideEffects(
SDValue(
Ld, 1))) {
18577 if (
ST->isUnindexed() &&
ST->isSimple() &&
18578 ST1->isUnindexed() &&
ST1->isSimple()) {
18580 ST1->getValue() ==
Value &&
ST->getMemoryVT() ==
ST1->getMemoryVT() &&
18581 ST->getAddressSpace() ==
ST1->getAddressSpace()) {
18588 !
ST1->getBasePtr().isUndef() &&
18591 !
ST->getMemoryVT().isScalableVector() &&
18592 !
ST1->getMemoryVT().isScalableVector() &&
18593 ST->getAddressSpace() ==
ST1->getAddressSpace()) {
18596 unsigned STBitSize =
ST->getMemoryVT().getFixedSizeInBits();
18603 CombineTo(
ST1,
ST1->getChain());
18616 ST->getMemoryVT(), LegalOperations)) {
18618 Ptr,
ST->getMemoryVT(),
ST->getMemOperand());
18660 if (!LifetimeEnd->hasOffset())
18664 LifetimeEnd->getOffset(),
false);
18668 while (!Chains.empty()) {
18669 SDValue Chain = Chains.pop_back_val();
18670 if (!Chain.hasOneUse())
18672 switch (Chain.getOpcode()) {
18674 for (
unsigned Nops = Chain.getNumOperands();
Nops;)
18675 Chains.push_back(Chain.getOperand(--
Nops));
18682 Chains.push_back(Chain.getOperand(0));
18687 if (!
ST->isSimple() ||
ST->isIndexed())
18689 const TypeSize StoreSize =
ST->getMemoryVT().getStoreSize();
18700 dbgs() <<
"\nwithin LIFETIME_END of : ";
18702 CombineTo(ST,
ST->getChain());
18745 if (!
ST->isSimple())
18778 !
Lo.getOperand(0).getValueType().isScalarInteger() ||
18781 !
Hi.getOperand(0).getValueType().isScalarInteger() ||
18788 ?
Lo.getOperand(0).getValueType()
18789 :
Lo.getValueType();
18791 ?
Hi.getOperand(0).getValueType()
18792 :
Hi.getValueType();
18809 ST->getOriginalAlign(), MMOFlags, AAInfo);
18814 ST->getOriginalAlign(), MMOFlags, AAInfo);
18821 "Expected extract_vector_elt");
18865 int Step =
ArgVal.getOperand(0).getValueType().getVectorNumElements();
18899 !
InsertVal.getOperand(0).getValueType().isVector())
18945 AddToWorklist(
Shuf.getNode());
19007 AddToWorklist(
NewOp.getNode());
19024 Ops.append(
InVec.getNode()->op_begin(),
19025 InVec.getNode()->op_end());
19026 }
else if (
InVec.isUndef()) {
19034 if (
Elt < Ops.size()) {
19037 EVT OpVT = Ops[0].getValueType();
19110 Chain =
Load.getValue(1);
19115 Chain =
Load.getValue(1);
19121 WorklistRemover DeadNodes(*
this);
19126 AddToWorklist(
EVE);
19137 bool LegalOperations) {
19174 if (
VecOp.isUndef())
19183 Index ==
VecOp.getOperand(2)) {
19218 "BUILD_VECTOR used for scalable vectors");
19245 VecOp.hasOneUse()) {
19255 if (LegalTypes &&
BCSrc.getValueType().isInteger() &&
19260 assert(
X.getValueType().isScalarInteger() &&
ScalarVT.isScalarInteger() &&
19261 "Extract element and scalar to vector can't change element type "
19262 "from FP to integer.");
19263 unsigned XBitWidth =
X.getValueSizeInBits();
19271 "Scalar bitwidth must be a multiple of vector element bitwidth");
19317 if (!LegalOperations ||
19329 return Use->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
19330 Use->getOperand(0) == VecOp &&
19331 isa<ConstantSDNode>(Use->getOperand(1));
19367 if (!
VecOp.hasOneUse())
19371 if (!
BCVT.isVector() ||
ExtVT.bitsGT(
BCVT.getVectorElementType()))
19380 if (!LegalOperations && !
IndexC &&
VecOp.hasOneUse() &&
19382 !Index->hasPredecessor(
VecOp.getNode())) {
19384 if (VecLoad &&
VecLoad->isSimple())
19390 if (!LegalOperations || !
IndexC)
19401 VecOp.getOperand(0).getValueType() ==
ExtVT &&
19404 if (!
VecOp.hasOneUse())
19415 if (!
VecOp.hasOneUse())
19429 if (!
VecOp.hasOneUse())
19443 VecOp.getOperand(0).getValueType().getVectorElementType()))) {
19456 Index.getValueType());
19467 if (!
LN0 || !
LN0->hasNUsesOfValue(1,0) || !
LN0->isSimple())
19490 EVT VT =
N->getValueType(0);
19503 if (
In.isUndef())
continue;
19515 EVT InTy =
In.getOperand(0).getValueType();
19559 for (
unsigned i = 0, e =
N->getNumOperands(); i !=
e; ++i) {
19563 Cast.isUndef()) &&
"Invalid cast opcode");
19565 if (
Cast.isUndef())
19568 In =
Cast->getOperand(0);
19569 unsigned Index = isLE ? (i *
ElemRatio) :
19572 assert(Index < Ops.size() &&
"Invalid index");
19579 "Invalid vector size");
19590 AddToWorklist(
BV.getNode());
19607 EVT VT =
N->getValueType(0);
19620 return Op.getOperand(0);
19629 if (
In.isUndef())
continue;
19649 }
else if (Src != part) {
19658 uint64_t ShiftAmt =
In.getNode()->getConstantOperandVal(1);
19667 if (Src.getValueType().getSizeInBits() != VT.
getSizeInBits())
19679 EVT VT =
N->getValueType(0);
19695 "Inputs must be sorted to be in non-increasing vector size order.");
19714 if (!
VecIn2.getNode()) {
19725 "Second input is not going to be larger than the first one.");
19731 if (LegalOperations &&
19765 for (
unsigned i = 0; i !=
NumElems; ++i) {
19769 unsigned ExtIndex =
N->getOperand(i).getConstantOperandVal(1);
19799 for (
int i = 0; i !=
NumBVOps; ++i) {
19818 EVT VT =
BV->getValueType(0);
19870template <
typename R,
typename T>
19872 auto I =
find(Range, Val);
19873 if (
I == Range.end())
19874 return static_cast<decltype(std::distance(Range.begin(),
I))
>(-1);
19875 return std::distance(Range.begin(),
I);
19883 EVT VT =
N->getValueType(0);
19897 unsigned NumElems =
N->getNumOperands();
19909 for (
unsigned i = 0; i !=
NumElems; ++i) {
19956 if (
VecIn.size() < 2)
19964 if (
VecIn.size() == 2) {
19965 unsigned MaxIndex = 0;
19971 for (
unsigned i = 0; i <
NumElems; i++) {
19974 unsigned Index =
N->getOperand(i).getConstantOperandVal(1);
19976 MaxIndex = std::max(MaxIndex, Index);
19987 InVT.getVectorNumElements()) {
19997 for (
unsigned i = 0; i <
NumElems; i++) {
20045 for (
unsigned In = 0, Len = (
VecIn.size() / 2);
In < Len; ++
In) {
20053 Shuffles.push_back(Shuffle);
20062 : DAG.getConstantFP(0.0,
DL, VT));
20065 if (Shuffles.size() == 1)
20066 return Shuffles[0];
20071 Vec = Shuffles.size() - 1;
20073 Vec = (Vec - 1) / 2;
20087 if (Shuffles.size() % 2)
20088 Shuffles.push_back(DAG.
getUNDEF(VT));
20095 for (
unsigned In = 0, Len =
CurSize / 2;
In < Len; ++
In) {
20099 for (
unsigned i = 0; i !=
NumElems; ++i) {
20113 return Shuffles[0];
20121 if (LegalOperations)
20124 EVT VT =
N->getValueType(0);
20129 unsigned Opc =
Op.getOpcode();
20135 return C->getZExtValue();
20147 unsigned NumElems =
N->getNumOperands();
20149 EVT InSVT =
In.getValueType().getScalarType();
20157 for (
unsigned i = 1; i !=
NumElems; ++i) {
20170 EVT VT =
N->getValueType(0);
20184 if (!LegalOperations) {
20188 if (
SrcVT.isVector()) {
20189 unsigned NumElts =
N->getNumOperands() *
SrcVT.getVectorNumElements();
20203 if (!LegalTypes && (
N->getNumOperands() > 1)) {
20209 return CNode->getZExtValue();
20214 for (
unsigned i = 0; i <
N->getNumOperands(); ++i) {
20225 ((
Offset %
N->getValueType(0).getVectorNumElements()) ==
20247 assert(!V.isUndef() &&
"Splat of undef should have been handled earlier");
20256 EVT OpVT =
N->getOperand(0).getValueType();
20263 EVT VT =
N->getValueType(0);
20271 bool AnyFP =
false;
20272 for (
const SDValue &Op :
N->ops()) {
20274 !Op.getOperand(0).getValueType().isVector())
20275 Ops.push_back(Op.getOperand(0));
20300 if (Op.getValueType() ==
SVT)
20320 EVT VT =
N->getValueType(0);
20325 for (
const SDValue &Op :
N->ops()) {
20331 SubVT = Op.getOperand(0).getValueType();
20337 if (
SubVT != Op.getOperand(0).getValueType())
20343 for (
const SDValue &Op :
N->ops()) {
20344 if (Op.isUndef()) {
20348 ConcatOps.append(Op->op_begin(), Op->op_end());
20358 EVT VT =
N->getValueType(0);
20359 EVT OpVT =
N->getOperand(0).getValueType();
20375 if (Op.isUndef()) {
20385 int ExtIdx = Op.getConstantOperandVal(1);
20416 Mask.push_back(i +
ExtIdx);
20432 unsigned CastOpcode =
N->getOperand(0).getOpcode();
20449 EVT SrcVT =
N->getOperand(0).getOperand(0).getValueType();
20450 if (!
SrcVT.isVector())
20457 if (Op.getOpcode() !=
CastOpcode || !Op.hasOneUse() ||
20458 Op.getOperand(0).getValueType() !=
SrcVT)
20460 SrcOps.push_back(Op.getOperand(0));
20466 EVT VT =
N->getValueType(0);
20496 if (
N->getNumOperands() == 1)
20500 EVT VT =
N->getValueType(0);
20506 [](
const SDValue &Op) { return Op.isUndef(); })) {
20508 assert(
In.getValueType().isVector() &&
"Must concat vectors");
20513 unsigned NumOps =
N->getNumOperands() *
In.getNumOperands();
20525 EVT SVT =
Scalar.getValueType().getVectorElementType();
20526 if (
SVT ==
Scalar.getOperand(0).getValueType())
20531 if (!
Scalar.getValueType().isVector()) {
20542 if (!
SclTy.isFloatingPoint() && !
SclTy.isInteger())
20574 if (!
SVT.isFloatingPoint()) {
20580 EVT OpSVT =
Op.getOperand(0).getValueType();
20595 if (
SVT.isFloatingPoint()) {
20596 assert(
SVT ==
OpVT.getScalarType() &&
"Concat vector type mismatch");
20599 for (
unsigned i = 0; i !=
NumElts; ++i)
20607 "Concat vector type mismatch");
20637 N->getOperand(0).getValueType().getVectorMinNumElements();
20639 for (
unsigned i = 0, e =
N->getNumOperands(); i !=
e; ++i) {
20679 V.getOperand(1).getValueType() ==
SubVT && V.getOperand(2) == Index) {
20680 return V.getOperand(1);
20684 V.getOperand(0).getValueType() ==
SubVT &&
20685 (
IndexC->getZExtValue() %
SubVT.getVectorMinNumElements()) == 0) {
20687 return V.getOperand(SubIdx);
20694 bool LegalOperations) {
20703 if (VecVT !=
Bop0.getValueType() || VecVT !=
Bop1.getValueType())
20730 bool LegalOperations) {
20755 if (
C &&
C->getValueAPF().isNegZero())
20764 if (!
WideBVT.isFixedLengthVector())
20770 "Extract index is not a multiple of the vector length.");
20866 if (!
Ld ||
Ld->getExtensionType() || !
Ld->isSimple())
20881 assert(Index %
NumElts == 0 &&
"The extract subvector index is not a "
20882 "multiple of the result's element count");
20901 if (
Offset.isScalable()) {
20924 bool LegalOperations) {
20926 "Must only be called on EXTRACT_SUBVECTOR's");
20933 if (!
NarrowVT.isFixedLengthVector() || !
WideVT.isFixedLengthVector())
20946 if (LegalOperations &&
20953 "Extract index is not a multiple of the output vector length.");
20966 "Out-of-bounds shuffle mask?");
20980 "Shuffle mask vector decomposition failure.");
20988 "Shuffle mask subvector decomposition failure.");
20992 "Shuffle mask full decomposition failure.");
20996 if (Op.isUndef()) {
21029 "Should have ended up demanding at most two subvectors.");
21046 for (
const std::pair<SDValue /*Op*/, int /*SubvectorIndex*/>
21055 "Should end up with either one or two ops");
21065 EVT NVT =
N->getValueType(0);
21081 V.getConstantOperandVal(1)) &&
21091 V.getOperand(0).getValueType().isVector() &&
21096 unsigned DestNumElts = V.getValueType().getVectorMinNumElements();
21145 "Concat and extract subvector do not change element type");
21147 "Extract index is not a multiple of the input vector length.");
21155 if (
NVT.getVectorElementCount() ==
ConcatSrcVT.getVectorElementCount())
21163 if (
NVT.isFixedLengthVector() &&
ConcatSrcVT.isFixedLengthVector() &&
21168 "Trying to extract from >1 concat operand?");
21170 "Extract index is not a multiple of the input vector length.");
21187 unsigned EltSize =
InVT.getScalarSizeInBits();
21203 if (
EltVT != Src.getValueType())
21219 EVT SmallVT = V.getOperand(1).getValueType();
21238 DAG.
getBitcast(
N->getOperand(0).getValueType(), V.getOperand(0)),
21266 EVT VT =
Shuf->getValueType(0);
21271 for (
unsigned i = 0; i !=
NumElts; ++i) {
21304 EVT VT =
N->getValueType(0);
21351 if (0 <= OpIdx &&
EltOpIdx != OpIdx)
21355 assert(0 <= OpIdx &&
"Unknown concat_vectors op");
21385 EVT VT =
SVN->getValueType(0);
21395 if (!
N1.isUndef()) {
21396 if (!
N1->hasOneUse())
21409 bool IsSplat =
false;
21414 IsSplat = (
Splat0 ==
BV1->getSplatValue());
21418 for (
int M :
SVN->getMask()) {
21448 if (
SVT.isInteger())
21450 SVT = (
SVT.bitsLT(Op.getValueType()) ? Op.getValueType() :
SVT);
21466 bool LegalOperations) {
21467 EVT VT =
SVN->getValueType(0);
21481 for (
unsigned i = 0; i !=
NumElts; ++i) {
21484 if ((i % Scale) == 0 && Mask[i] == (int)(i / Scale))
21493 for (
unsigned Scale = 2; Scale <
NumElts; Scale *= 2) {
21505 if (!LegalOperations ||
21522 EVT VT =
SVN->getValueType(0);
21552 for (
unsigned i = 0; i !=
NumElts; ++i) {
21555 if ((i * Scale) <
NumElts && Mask[i] == (int)(i * Scale))
21583 if (!
Shuf->getOperand(1).isUndef())
21586 if (!Splat || !Splat->isSplat())
21611 for (
unsigned i = 0, e =
UserMask.size(); i != e; ++i)
21618 return Shuf->getOperand(0);
21627 Splat->getOperand(0), Splat->getOperand(1),
21635 if (!
OuterShuf->getOperand(1).isUndef())
21646 int SplatIndex = -1;
21647 for (
unsigned i = 0; i !=
NumElts; ++i) {
21659 if (SplatIndex == -1)
21670 "Expected a splat mask");
21674 assert(VT ==
InnerShuf->getValueType(0) &&
"Expected matching shuffle types");
21693 for (
int i = 0; i !=
MaskSize; ++i) {
21694 if (Mask[i] >= 0 && Mask[i] <
MaskSize) {
21699 }
else if (Mask[i] != i +
MaskSize) {
21736 "Shuffle mask value must be from operand 0");
21766 if (!
Shuf0 || !
Shuf->getOperand(1).isUndef())
21771 for (
int i = 0, e = (
int)Mask.size(); i != e; ++i) {
21775 assert(Mask[i] >= 0 && Mask[i] < e &&
"Unexpected shuffle mask value");
21784 return Shuf->getOperand(0);
21788 EVT VT =
N->getValueType(0);
21812 if (
N1.isUndef()) {
21815 for (
unsigned i = 0; i !=
NumElts; ++i) {
21816 int Idx =
SVN->getMaskElt(i);
21839 if (
SVN->isSplat() &&
SVN->getSplatIndex() < (
int)
NumElts) {
21840 int SplatIndex =
SVN->getSplatIndex();
21864 if (
ConvInput.getValueType().isVector() &&
21871 "BUILD_VECTOR has wrong number of operands");
21874 for (
unsigned i = 0; i !=
NumElts; ++i) {
21875 if (!V->getOperand(i).isUndef()) {
21876 Base = V->getOperand(i);
21881 if (!
Base.getNode())
21883 for (
unsigned i = 0; i !=
NumElts; ++i) {
21884 if (V->getOperand(i) !=
Base) {
21900 if (V->getValueType(0) != VT)
21940 for (
unsigned i = 0; i !=
NumElts; ++i) {
21941 int Idx =
SVN->getMaskElt(i);
21965 EVT SubVT = RHS.getOperand(0).getValueType();
21991 for (
int i = 0; i != (int)
NumElts; ++i) {
22045 0 == (
SVT.getSizeInBits() %
ScaleSVT.getSizeInBits())) {
22098 for (
unsigned i = 0; i !=
NumElts; ++i) {
22099 int Idx =
SVN->getMaskElt(i);
22128 Mask.push_back(-1);
22155 Mask.push_back(-1);
22162 Mask.push_back(-1);
22190 if (TLI.isShuffleMaskLegal(Mask, VT))
22195 return TLI.isShuffleMaskLegal(Mask, VT);
22207 assert(
N1->getOperand(0).getValueType() == VT &&
22208 "Shuffle types don't match");
22235 for (
int i = 0; i != 2; ++i) {
22237 N->isOnlyUserOf(
N->getOperand(i).getNode())) {
22242 "Shuffle types don't match");
22253 SV0 ?
SV0 : DAG.getUNDEF(VT),
22264 if (TLI.isBinOp(SrcOpcode) &&
N->isOnlyUserOf(N0.
getNode()) &&
22266 (SrcOpcode ==
N1.getOpcode() &&
N->isOnlyUserOf(
N1.getNode())))) {
22275 if (
Op00.getValueType() == VT &&
Op10.getValueType() == VT &&
22276 Op01.getValueType() == VT &&
Op11.getValueType() == VT &&
22331 return DAG.
getNode(SrcOpcode,
DL, VT, LHS, RHS);
22345 EVT VT =
N->getValueType(0);
22351 InVal->getOperand(0).getValueType().isFixedLengthVector()) {
22357 int Elt =
C0->getZExtValue();
22362 InVal.getValueType().isScalarInteger() &&
22382 InVecT.getVectorElementType(),
22396 EVT VT =
N->getValueType(0);
22409 N1.getOperand(1) == N2 &&
N1.getOperand(0).getValueType() == VT)
22410 return N1.getOperand(0);
22418 N1.getOperand(0).getOperand(1) == N2 &&
22419 N1.getOperand(0).getOperand(0).getValueType().getVectorElementCount() ==
22421 N1.getOperand(0).getOperand(0).getValueType().getSizeInBits() ==
22423 return DAG.
getBitcast(VT,
N1.getOperand(0).getOperand(0));
22436 CN0VT.getVectorElementType() ==
CN1VT.getVectorElementType() &&
22459 N1.getOperand(1), N2);
22471 N0Src.getValueType().isVector() &&
N1Src.getValueType().isVector()) {
22484 if (
NumElts.isKnownMultipleOf(Scale) && (
InsIdx % Scale) == 0) {
22486 NumElts.divideCoefficientBy(Scale));
22509 AddToWorklist(
NewOp.getNode());
22520 N1.getValueType().isScalableVector()) {
22521 unsigned Factor =
N1.getValueType().getVectorMinNumElements();
22562 unsigned Opcode =
N->getOpcode();
22580 if (!TLI.isOperationLegalOrCustom(Opcode, VT) &&
22581 TLI.isOperationLegalOrCustom(
NewOpcode, VT) &&
22606 return DAG.
getUNDEF(
N->getValueType(0));
22611 if (
MemSD->writeMem())
22612 return MemSD->getChain();
22613 return CombineTo(
N, DAG.
getUNDEF(
N->getValueType(0)),
MemSD->getChain());
22618 return N->getOperand(0);
22630 EVT VT =
N->getValueType(0);
22637 if (LegalOperations)
22643 EVT RVT = RHS.getValueType();
22644 unsigned NumElts = RHS.getNumOperands();
22656 int SubIdx = i % Split;
22660 if (
Elt.isUndef()) {
22679 if (
Bits.isAllOnes())
22680 Indices.push_back(i);
22681 else if (Bits == 0)
22690 if (!TLI.isVectorClearMaskLegal(Indices,
ClearVT))
22701 if (
RVT.getScalarSizeInBits() % 8 == 0)
22704 for (
int Split = 1; Split <=
MaxSplit; ++Split)
22705 if (
RVT.getScalarSizeInBits() % Split == 0)
22718 unsigned Opcode =
N->getOpcode();
22719 EVT VT =
N->getValueType(0);
22760 EVT VT =
N->getValueType(0);
22761 assert(VT.
isVector() &&
"SimplifyVBinOp only works on vectors!");
22765 unsigned Opcode =
N->getOpcode();
22781 LHS.getOperand(1).isUndef() && RHS.getOperand(1).isUndef() &&
22782 (LHS.hasOneUse() || RHS.hasOneUse() || LHS == RHS)) {
22784 RHS.getOperand(0), Flags);
22796 Shuf0->hasOneUse() &&
Shuf0->getOperand(1).isUndef() &&
22805 Shuf1->hasOneUse() &&
Shuf1->getOperand(1).isUndef() &&
22821 LHS.getOperand(2) == RHS.getOperand(2) &&
22822 (LHS.hasOneUse() || RHS.hasOneUse())) {
22828 TLI.isOperationLegalOrCustomOrPromote(Opcode,
NarrowVT,
22829 LegalOperations)) {
22842 return Op.isUndef() ||
22843 ISD::isBuildVectorOfConstantSDNodes(Op.getNode());
22853 (LHS.hasOneUse() || RHS.hasOneUse())) {
22855 if (
NarrowVT == RHS.getOperand(0).getValueType() &&
22856 TLI.isOperationLegalOrCustomOrPromote(Opcode,
NarrowVT)) {
22857 unsigned NumOperands = LHS.getNumOperands();
22859 for (
unsigned i = 0; i != NumOperands; ++i) {
22862 RHS.getOperand(i)));
22885 if (
SCC.getNode()) {
22892 SCC.getOperand(0),
SCC.getOperand(1),
22893 SCC.getOperand(4), Flags);
22894 AddToWorklist(
SETCC.getNode());
22896 SCC.getOperand(2),
SCC.getOperand(3));
22916 if (NaN->isNaN() && RHS.getOpcode() ==
ISD::FSQRT) {
22932 CmpLHS =
Cmp.getOperand(0);
22936 if (Zero && Zero->
isZero() &&
22946 if (
TheSelect->getOperand(0).getValueType().isVector())
return false;
22950 if (LHS.getOpcode() != RHS.getOpcode() ||
22951 !LHS.hasOneUse() || !RHS.hasOneUse())
22963 if (LHS.getOperand(0) != RHS.getOperand(0) ||
22967 !
LLD->isSimple() || !
RLD->isSimple() ||
22970 LLD->isIndexed() ||
RLD->isIndexed() ||
22972 LLD->getMemoryVT() !=
RLD->getMemoryVT() ||
22974 (
LLD->getExtensionType() !=
RLD->getExtensionType() &&
22983 LLD->getPointerInfo().getAddrSpace() != 0 ||
22984 RLD->getPointerInfo().getAddrSpace() != 0 ||
22989 !TLI.isOperationLegalOrCustom(
TheSelect->getOpcode(),
22990 LLD->getBasePtr().getValueType()))
22994 if (
LLD->isPredecessorOf(
RLD) ||
RLD->isPredecessorOf(
LLD))
23008 Worklist.push_back(
LLD);
23009 Worklist.push_back(
RLD);
23025 if ((
LLD->hasAnyUseOfValue(1) &&
23027 (
RLD->hasAnyUseOfValue(1) &&
23032 LLD->getBasePtr().getValueType(),
23034 RLD->getBasePtr());
23047 if ((
LLD->hasAnyUseOfValue(1) &&
23049 (
RLD->hasAnyUseOfValue(1) &&
23054 LLD->getBasePtr().getValueType(),
23057 LLD->getBasePtr(),
RLD->getBasePtr(),
23065 Align Alignment = std::min(
LLD->getAlign(),
RLD->getAlign());
23067 if (!
RLD->isInvariant())
23069 if (!
RLD->isDereferenceable())
23090 CombineTo(LHS.getNode(),
Load.getValue(0),
Load.getValue(1));
23091 CombineTo(RHS.getNode(),
Load.getValue(0),
Load.getValue(1));
23115 if (CC ==
ISD::SETGT && TLI.hasAndNot(N2)) {
23133 if (
N2C && ((
N2C->getAPIntValue() & (
N2C->getAPIntValue() - 1)) == 0)) {
23134 unsigned ShCt =
XType.getSizeInBits() -
N2C->getAPIntValue().logBase2() - 1;
23135 if (!TLI.shouldAvoidTransformToShift(
XType,
ShCt)) {
23138 AddToWorklist(Shift.
getNode());
23142 AddToWorklist(Shift.
getNode());
23152 unsigned ShCt =
XType.getSizeInBits() - 1;
23153 if (TLI.shouldAvoidTransformToShift(
XType,
ShCt))
23158 AddToWorklist(Shift.
getNode());
23162 AddToWorklist(Shift.
getNode());
23176 EVT VT =
N->getValueType(0);
23209 VT ==
N1.getOperand(1).getValueType() &&
23226 EVT VT =
N->getValueType(0);
23228 bool IsFree =
IsFabs ? TLI.isFAbsFree(VT) : TLI.isFNegFree(VT);
23248 SignMask = ~SignMask;
23254 SignMask = ~SignMask;
23258 DAG.getConstant(SignMask,
DL,
IntVT));
23259 AddToWorklist(
Int.getNode());
23267SDValue DAGCombiner::convertSelectOfFPConstantsToLoadOffset(
23270 if (!TLI.reduceSelectOfFPConstantLoads(N0.
getValueType()))
23278 if (!
TV || !
FV || !TLI.isTypeLegal(VT))
23283 TLI.isFPImmLegal(
TV->getValueAPF(),
TV->getValueType(0), ForCodeSize) ||
23284 TLI.isFPImmLegal(
FV->getValueAPF(),
FV->getValueType(0), ForCodeSize))
23289 if (!
TV->hasOneUse() && !
FV->hasOneUse())
23310 AddToWorklist(
Cond.getNode());
23314 AddToWorklist(
CPIdx.getNode());
23326 if (N2 == N3)
return N2;
23337 AddToWorklist(
SCC.getNode());
23341 return !(
SCCC->isZero()) ? N2 : N3;
23366 if (!TLI.shouldAvoidTransformToShift(VT,
ShCt)) {
23388 if ((Fold || Swap) &&
23389 TLI.getBooleanContents(
CmpOpVT) ==
23416 AddToWorklist(
SCC.getNode());
23417 AddToWorklist(Temp.
getNode());
23422 unsigned ShCt =
N2C->getAPIntValue().logBase2();
23423 if (TLI.shouldAvoidTransformToShift(VT,
ShCt))
23454 (!LegalOperations || TLI.isOperationLegal(
ISD::CTTZ, VT)))
23461 (!LegalOperations || TLI.isOperationLegal(
ISD::CTLZ, VT)))
23470 N2C->getAPIntValue() == ~
N3C->getAPIntValue() &&
23473 !TLI.shouldAvoidTransformToShift(VT,
CmpOpVT.getScalarSizeInBits() - 1)) {
23509 if (
SDValue S = TLI.BuildSDIV(
N, DAG, LegalOperations,
Built)) {
23530 if (
SDValue S = TLI.BuildSDIVPow2(
N,
C->getAPIntValue(), DAG,
Built)) {
23550 if (
SDValue S = TLI.BuildUDIV(
N, DAG, LegalOperations,
Built)) {
23562 EVT VT = V.getValueType();
23583 EVT VT =
Op.getValueType();
23590 int Enabled = TLI.getRecipEstimateDivEnabled(VT, MF);
23591 if (
Enabled == TLI.ReciprocalEstimate::Disabled)
23596 int Iterations = TLI.getDivRefinementSteps(VT, MF);
23598 AddToWorklist(
Est.getNode());
23606 for (
int i = 0; i < Iterations; ++i) {
23609 if (i == Iterations - 1) {
23611 AddToWorklist(
MulEst.getNode());
23615 AddToWorklist(
NewEst.getNode());
23619 AddToWorklist(
NewEst.getNode());
23622 AddToWorklist(
NewEst.getNode());
23625 AddToWorklist(
Est.getNode());
23630 AddToWorklist(
Est.getNode());
23646 unsigned Iterations,
23648 EVT VT =
Arg.getValueType();
23658 for (
unsigned i = 0; i < Iterations; ++i) {
23678 unsigned Iterations,
23680 EVT VT =
Arg.getValueType();
23691 for (
unsigned i = 0; i < Iterations; ++i) {
23723 EVT VT =
Op.getValueType();
23730 int Enabled = TLI.getRecipEstimateSqrtEnabled(VT, MF);
23731 if (
Enabled == TLI.ReciprocalEstimate::Disabled)
23736 int Iterations = TLI.getSqrtRefinementSteps(VT, MF);
23742 AddToWorklist(
Est.getNode());
23775bool DAGCombiner::mayAlias(
SDNode *Op0,
SDNode *Op1)
const {
23791 ?
C->getSExtValue()
23793 ? -1 *
C->getSExtValue()
23797 return {
LSN->isVolatile(),
LSN->isAtomic(),
LSN->getBasePtr(),
23800 LSN->getMemOperand()};
23803 return {
false ,
false,
LN->getOperand(1),
23804 (
LN->hasOffset()) ?
LN->getOffset() : 0,
23809 return {
false ,
false,
SDValue(),
23815 MUC1 = getCharacteristics(Op1);
23818 if (
MUC0.BasePtr.getNode() &&
MUC0.BasePtr ==
MUC1.BasePtr &&
23823 if (
MUC0.IsVolatile &&
MUC1.IsVolatile)
23828 if (
MUC0.IsAtomic &&
MUC1.IsAtomic)
23832 if ((
MUC0.MMO->isInvariant() &&
MUC1.MMO->isStore()) ||
23833 (
MUC1.MMO->isInvariant() &&
MUC0.MMO->isStore()))
23852 if ((
MUC0.MMO->isInvariant() &&
MUC1.MMO->isStore()) ||
23853 (
MUC1.MMO->isInvariant() &&
MUC0.MMO->isStore()))
23866 auto &Size1 =
MUC1.NumBytes;
23868 Size0.hasValue() && Size1.hasValue() && *
Size0 == *Size1 &&
23889 if (
UseAA && AA &&
MUC0.MMO->getValue() &&
MUC1.MMO->getValue() &&
23890 Size0.hasValue() && Size1.hasValue()) {
23920 unsigned Depth = 0;
23924 switch (
C.getOpcode()) {
23937 C =
C.getOperand(0);
23946 C =
C.getOperand(0);
23955 C =
C.getOperand(0);
23968 while (!Chains.empty()) {
23969 SDValue Chain = Chains.pop_back_val();
23972 if (!Visited.insert(Chain.getNode()).second)
23981 if (
Depth > TLI.getGatherAllAliasesMaxDepth()) {
23992 if (Chain.getNumOperands() > 16) {
23993 Aliases.push_back(Chain);
23996 for (
unsigned n = Chain.getNumOperands();
n;)
23997 Chains.push_back(Chain.getOperand(--
n));
24004 if (Chain.getNode())
24005 Chains.push_back(Chain);
24010 Aliases.push_back(Chain);
24027 if (Aliases.size() == 0)
24031 if (Aliases.size() == 1)
24040struct UnitT { } Unit;
24041bool operator==(
const UnitT &,
const UnitT &) {
return true; }
24042bool operator!=(
const UnitT &,
const UnitT &) {
return false; }
24058bool DAGCombiner::parallelizeChainedStores(
StoreSDNode *
St) {
24075 if (!
BasePtr.getBase().getNode())
24079 if (
BasePtr.getBase().isUndef())
24083 if (
St->getMemoryVT().isZeroSized())
24089 if (
St->getMemoryVT().isScalableVector())
24093 Intervals.insert(0, (
St->getMemoryVT().getSizeInBits() + 7) / 8, Unit);
24096 if (Chain->getMemoryVT().isScalableVector())
24103 if (!Chain->isSimple() || Chain->isIndexed())
24112 int64_t Length = (Chain->getMemoryVT().getSizeInBits() + 7) / 8;
24115 auto I = Intervals.find(
Offset);
24117 if (
I != Intervals.end() &&
I.start() < (
Offset + Length))
24120 if (
I != Intervals.begin() && (--
I).stop() <=
Offset)
24148 if (
St->isTruncatingStore())
24150 St->getBasePtr(),
St->getMemoryVT(),
24151 St->getMemOperand());
24154 St->getBasePtr(),
St->getMemOperand());
24174 AddToWorklist(
Op.getNode());
24179bool DAGCombiner::findBetterNeighborChains(
StoreSDNode *
St) {
24186 if (!
BasePtr.getBase().getNode())
24190 if (
BasePtr.getBase().isUndef())
24210 DAGCombiner(*
this, AA, OptLevel).Run(Level);
static bool mayAlias(MachineInstr &MIa, SmallVectorImpl< MachineInstr * > &MemInsns, AliasAnalysis *AA)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< bool > UseAA("aarch64-use-aa", cl::init(true), cl::desc("Enable the use of AA during codegen."))
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static uint64_t * getMemory(unsigned numWords)
A utility function for allocating memory and checking for allocation failure.
This file implements a class to represent arbitrary precision integral constant values and operations...
This file contains the simple types necessary to represent the attributes associated with functions a...
SmallVector< MachineOperand, 4 > Cond
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< ShadowStackGC > C("shadow-stack", "Very portable GC for uncooperative code generators")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static bool splitMergedValStore(StoreInst &SI, const DataLayout &DL, const TargetLowering &TLI)
For the instruction sequence of store below, F and I values are bundled together as an i64 value befo...
static unsigned bigEndianByteAt(const unsigned ByteWidth, const unsigned I)
static Optional< bool > isBigEndian(const SmallDenseMap< int64_t, int64_t, 8 > &MemOffset2Idx, int64_t LowestIdx)
Given a map from byte offsets in memory to indices in a load/store, determine if that map corresponds...
static unsigned littleEndianByteAt(const unsigned ByteWidth, const unsigned I)
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
static bool isAnyConstantBuildVector(SDValue V, bool NoOpaques=false)
static cl::opt< bool > EnableShrinkLoadReplaceStoreWithStore("combiner-shrink-load-replace-store-with-store", cl::Hidden, cl::init(true), cl::desc("DAG combiner enable load/<replace bytes>/store with " "a narrower store"))
static bool ExtendUsesToFormExtLoad(EVT VT, SDNode *N, SDValue N0, unsigned ExtOpc, SmallVectorImpl< SDNode * > &ExtendNodes, const TargetLowering &TLI)
static bool CanCombineFCOPYSIGN_EXTEND_ROUND(SDNode *N)
copysign(x, fp_extend(y)) -> copysign(x, y) copysign(x, fp_round(y)) -> copysign(x,...
static cl::opt< unsigned > TokenFactorInlineLimit("combiner-tokenfactor-inline-limit", cl::Hidden, cl::init(2048), cl::desc("Limit the number of operands to inline for Token Factors"))
static SDValue ConvertSelectToConcatVector(SDNode *N, SelectionDAG &DAG)
static SDNode * getBuildPairElt(SDNode *N, unsigned i)
static SDValue scalarizeBinOpOfSplats(SDNode *N, SelectionDAG &DAG, const SDLoc &DL)
If a vector binop is performed on splat values, it may be profitable to extract, scalarize,...
static SDValue extractShiftForRotate(SelectionDAG &DAG, SDValue OppShift, SDValue ExtractFrom, SDValue &Mask, const SDLoc &DL)
Helper function for visitOR to extract the needed side of a rotate idiom from a shl/srl/mul/udiv.
static bool getCombineLoadStoreParts(SDNode *N, unsigned Inc, unsigned Dec, bool &IsLoad, bool &IsMasked, SDValue &Ptr, const TargetLowering &TLI)
static SDValue tryToFoldExtOfMaskedLoad(SelectionDAG &DAG, const TargetLowering &TLI, EVT VT, SDNode *N, SDValue N0, ISD::LoadExtType ExtLoadType, ISD::NodeType ExtOpc)
static bool isDivRemLibcallAvailable(SDNode *Node, bool isSigned, const TargetLowering &TLI)
Return true if divmod libcall is available.
static SDValue reduceBuildVecToShuffleWithZero(SDNode *BV, SelectionDAG &DAG)
static SDValue foldAddSubMasked1(bool IsAdd, SDValue N0, SDValue N1, SelectionDAG &DAG, const SDLoc &DL)
Given the operands of an add/sub operation, see if the 2nd operand is a masked 0/1 whose source opera...
static SDValue simplifyShuffleOfShuffle(ShuffleVectorSDNode *Shuf)
If we have a unary shuffle of a shuffle, see if it can be folded away completely.
static bool canSplitIdx(LoadSDNode *LD)
static SDValue ShrinkLoadReplaceStoreWithStore(const std::pair< unsigned, unsigned > &MaskInfo, SDValue IVal, StoreSDNode *St, DAGCombiner *DC)
Check to see if IVal is something that provides a value as specified by MaskInfo.
static cl::opt< bool > StressLoadSlicing("combiner-stress-load-slicing", cl::Hidden, cl::desc("Bypass the profitability model of load slicing"), cl::init(false))
Hidden option to stress test load slicing, i.e., when this option is enabled, load slicing bypasses m...
static cl::opt< bool > UseTBAA("combiner-use-tbaa", cl::Hidden, cl::init(true), cl::desc("Enable DAG combiner's use of TBAA"))
static void adjustCostForPairing(SmallVectorImpl< LoadedSlice > &LoadedSlices, LoadedSlice::Cost &GlobalLSCost)
Adjust the GlobalLSCost according to the target paring capabilities and the layout of the slices.
static SDValue narrowInsertExtractVectorBinOp(SDNode *Extract, SelectionDAG &DAG, bool LegalOperations)
static const Optional< ByteProvider > calculateByteProvider(SDValue Op, unsigned Index, unsigned Depth, bool Root=false)
Recursively traverses the expression calculating the origin of the requested byte of the given value.
static bool matchRotateHalf(SelectionDAG &DAG, SDValue Op, SDValue &Shift, SDValue &Mask)
Match "(X shl/srl V1) & V2" where V2 may not be present.
static bool isCompatibleLoad(SDValue N, unsigned ExtOpcode)
Check if N satisfies: N is used once.
static SDValue partitionShuffleOfConcats(SDNode *N, SelectionDAG &DAG)
static SDValue narrowExtractedVectorBinOp(SDNode *Extract, SelectionDAG &DAG, bool LegalOperations)
If we are extracting a subvector produced by a wide binary operator try to use a narrow binary operat...
static bool areUsedBitsDense(const APInt &UsedBits)
Check that all bits set in UsedBits form a dense region, i.e., UsedBits looks like 0....
static SDValue getInputChainForNode(SDNode *N)
Given a node, return its input chain if it has one, otherwise return a null sd operand.
static SDValue narrowExtractedVectorLoad(SDNode *Extract, SelectionDAG &DAG)
If we are extracting a subvector from a wide vector load, convert to a narrow load to eliminate the e...
static ElementCount numVectorEltsOrZero(EVT T)
static SDValue foldSelectWithIdentityConstant(SDNode *N, SelectionDAG &DAG, bool ShouldCommuteOperands)
This inverts a canonicalization in IR that replaces a variable select arm with an identity constant.
static SDValue widenCtPop(SDNode *Extend, SelectionDAG &DAG)
Given an extending node with a pop-count operand, if the target does not support a pop-count in the n...
static SDValue foldSelectOfConstantsUsingSra(SDNode *N, SelectionDAG &DAG)
If a (v)select has a condition value that is a sign-bit test, try to smear the condition operand sign...
static SDValue combineADDCARRYDiamond(DAGCombiner &Combiner, SelectionDAG &DAG, SDValue X, SDValue Carry0, SDValue Carry1, SDNode *N)
If we are facing some sort of diamond carry propapagtion pattern try to break it up to generate somet...
static SDValue replaceShuffleOfInsert(ShuffleVectorSDNode *Shuf, SelectionDAG &DAG)
If a shuffle inserts exactly one element from a source vector operand into another vector operand and...
static SDValue tryToFoldExtOfExtload(SelectionDAG &DAG, DAGCombiner &Combiner, const TargetLowering &TLI, EVT VT, bool LegalOperations, SDNode *N, SDValue N0, ISD::LoadExtType ExtLoadType)
static cl::opt< bool > CombinerGlobalAA("combiner-global-alias-analysis", cl::Hidden, cl::desc("Enable DAG combiner's use of IR alias analysis"))
static bool isConstantSplatVectorMaskForType(SDNode *N, EVT ScalarTy)
static SDValue formSplatFromShuffles(ShuffleVectorSDNode *OuterShuf, SelectionDAG &DAG)
Combine shuffle of shuffle of the form: shuf (shuf X, undef, InnerMask), undef, OuterMask --> splat X...
static bool canFoldInAddressingMode(SDNode *N, SDNode *Use, SelectionDAG &DAG, const TargetLowering &TLI)
Return true if 'Use' is a load or a store that uses N as its base pointer and that N may be folded in...
bool refineUniformBase(SDValue &BasePtr, SDValue &Index, SelectionDAG &DAG)
static SDValue foldExtractSubvectorFromShuffleVector(SDNode *N, SelectionDAG &DAG, const TargetLowering &TLI, bool LegalOperations)
Given EXTRACT_SUBVECTOR(VECTOR_SHUFFLE(Op0, Op1, Mask)), try to produce VECTOR_SHUFFLE(EXTRACT_SUBVEC...
static SDValue combineConcatVectorOfExtracts(SDNode *N, SelectionDAG &DAG)
static SDValue scalarizeExtractedBinop(SDNode *ExtElt, SelectionDAG &DAG, bool LegalOperations)
Transform a vector binary operation into a scalar binary operation by moving the math/logic after an ...
static bool hasNoInfs(const TargetOptions &Options, SDValue N)
static SDValue isSaturatingMinMax(SDValue N0, SDValue N1, SDValue N2, SDValue N3, ISD::CondCode CC, unsigned &BW, bool &Unsigned)
static SDValue combineShuffleToVectorExtend(ShuffleVectorSDNode *SVN, SelectionDAG &DAG, const TargetLowering &TLI, bool LegalOperations)
static SDValue PerformUMinFpToSatCombine(SDValue N0, SDValue N1, SDValue N2, SDValue N3, ISD::CondCode CC, SelectionDAG &DAG)
static SDValue tryToFoldExtendSelectLoad(SDNode *N, const TargetLowering &TLI, SelectionDAG &DAG)
Fold (sext (select c, load x, load y)) -> (select c, sextload x, sextload y) (zext (select c,...
static SDValue foldAndToUsubsat(SDNode *N, SelectionDAG &DAG)
For targets that support usubsat, match a bit-hack form of that operation that ends in 'and' and conv...
static SDValue stripTruncAndExt(SDValue Value)
static SDValue foldShuffleOfConcatUndefs(ShuffleVectorSDNode *Shuf, SelectionDAG &DAG)
Try to convert a wide shuffle of concatenated vectors into 2 narrow shuffles followed by concatenatio...
static SDValue combineShuffleOfSplatVal(ShuffleVectorSDNode *Shuf, SelectionDAG &DAG)
static auto getFirstIndexOf(R &&Range, const T &Val)
bool refineIndexType(MaskedGatherScatterSDNode *MGS, SDValue &Index, bool Scaled, SelectionDAG &DAG)
static std::pair< unsigned, unsigned > CheckForMaskedLoad(SDValue V, SDValue Ptr, SDValue Chain)
Check to see if V is (and load (ptr), imm), where the load is having specific bytes cleared out.
static SDValue foldAddSubOfSignBit(SDNode *N, SelectionDAG &DAG)
Try to fold a 'not' shifted sign-bit with add/sub with constant operand into a shift and add with a d...
static int getShuffleMaskIndexOfOneElementFromOp0IntoOp1(ArrayRef< int > Mask)
If the shuffle mask is taking exactly one element from the first vector operand and passing through a...
static cl::opt< bool > EnableStoreMerging("combiner-store-merging", cl::Hidden, cl::init(true), cl::desc("DAG combiner enable merging multiple stores " "into a wider store"))
static bool isContractableFMUL(const TargetOptions &Options, SDValue N)
static cl::opt< bool > MaySplitLoadIndex("combiner-split-load-index", cl::Hidden, cl::init(true), cl::desc("DAG combiner may split indexing from loads"))
static bool areSlicesNextToEachOther(const LoadedSlice &First, const LoadedSlice &Second)
Check whether or not First and Second are next to each other in memory.
static bool isBSwapHWordPair(SDValue N, MutableArrayRef< SDNode * > Parts)
static SDValue foldFPToIntToFP(SDNode *N, SelectionDAG &DAG, const TargetLowering &TLI)
static SDValue getTruncatedUSUBSAT(EVT DstVT, EVT SrcVT, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const SDLoc &DL)
static SDValue foldAddSubBoolOfMaskedVal(SDNode *N, SelectionDAG &DAG)
static SDValue foldBoolSelectToLogic(SDNode *N, SelectionDAG &DAG)
static SDNode * getPostIndexedLoadStoreOp(SDNode *N, bool &IsLoad, bool &IsMasked, SDValue &Ptr, SDValue &BasePtr, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG, const TargetLowering &TLI)
static bool isLegalToCombineMinNumMaxNum(SelectionDAG &DAG, SDValue LHS, SDValue RHS, const TargetLowering &TLI)
static SDValue extractBooleanFlip(SDValue V, SelectionDAG &DAG, const TargetLowering &TLI, bool Force)
Flips a boolean if it is cheaper to compute.
static bool isTruncateOf(SelectionDAG &DAG, SDValue N, SDValue &Op, KnownBits &Known)
static SDValue getSubVectorSrc(SDValue V, SDValue Index, EVT SubVT)
static SDValue getAsCarry(const TargetLowering &TLI, SDValue V)
static SDValue combineShiftOfShiftedLogic(SDNode *Shift, SelectionDAG &DAG)
If we have a shift-by-constant of a bitwise logic op that itself has a shift-by-constant operand with...
static SDValue foldBitcastedFPLogic(SDNode *N, SelectionDAG &DAG, const TargetLowering &TLI)
static SDValue combineCarryDiamond(SelectionDAG &DAG, const TargetLowering &TLI, SDValue Carry0, SDValue Carry1, SDNode *N)
static void zeroExtendToMatch(APInt &LHS, APInt &RHS, unsigned Offset=0)
static ConstantSDNode * getAsNonOpaqueConstant(SDValue N)
If N is a ConstantSDNode with isOpaque() == false return it casted to a ConstantSDNode pointer else n...
static SDValue combineMinNumMaxNum(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode CC, const TargetLowering &TLI, SelectionDAG &DAG)
Generate Min/Max node.
static SDValue combineShiftToMULH(SDNode *N, SelectionDAG &DAG, const TargetLowering &TLI)
static SDValue PerformMinMaxFpToSatCombine(SDValue N0, SDValue N1, SDValue N2, SDValue N3, ISD::CondCode CC, SelectionDAG &DAG)
static bool matchRotateSub(SDValue Pos, SDValue Neg, unsigned EltSize, SelectionDAG &DAG, bool IsRotate)
static SDValue visitORCommutative(SelectionDAG &DAG, SDValue N0, SDValue N1, SDNode *N)
OR combines for which the commuted variant will be tried as well.
static cl::opt< bool > EnableReduceLoadOpStoreWidth("combiner-reduce-load-op-store-width", cl::Hidden, cl::init(true), cl::desc("DAG combiner enable reducing the width of load/op/store " "sequence"))
static bool shouldCombineToPostInc(SDNode *N, SDValue Ptr, SDNode *PtrUse, SDValue &BasePtr, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG, const TargetLowering &TLI)
static SDValue tryToFoldExtendOfConstant(SDNode *N, const TargetLowering &TLI, SelectionDAG &DAG, bool LegalTypes)
Try to fold a sext/zext/aext dag node into a ConstantSDNode or a build_vector of constants.
static SDValue foldExtendedSignBitTest(SDNode *N, SelectionDAG &DAG, bool LegalOperations)
static SDValue combineConcatVectorOfCasts(SDNode *N, SelectionDAG &DAG)
static SDValue combineShiftAnd1ToBitTest(SDNode *And, SelectionDAG &DAG)
Try to replace shift/logic that tests if a bit is clear with mask + setcc.
static SDValue matchBSwapHWordOrAndAnd(const TargetLowering &TLI, SelectionDAG &DAG, SDNode *N, SDValue N0, SDValue N1, EVT VT, EVT ShiftAmountTy)
static SDValue stripConstantMask(SelectionDAG &DAG, SDValue Op, SDValue &Mask)
static SDValue combineShuffleOfScalars(ShuffleVectorSDNode *SVN, SelectionDAG &DAG, const TargetLowering &TLI)
static SDValue combineConcatVectorOfScalars(SDNode *N, SelectionDAG &DAG)
static SDValue foldVSelectToSignBitSplatMask(SDNode *N, SelectionDAG &DAG)
static SDValue combineConcatVectorOfConcatVectors(SDNode *N, SelectionDAG &DAG)
static SDValue tryToFoldExtOfLoad(SelectionDAG &DAG, DAGCombiner &Combiner, const TargetLowering &TLI, EVT VT, bool LegalOperations, SDNode *N, SDValue N0, ISD::LoadExtType ExtLoadType, ISD::NodeType ExtOpc)
static unsigned getPPCf128HiElementSelector(const SelectionDAG &DAG)
static SDValue combineTruncationShuffle(ShuffleVectorSDNode *SVN, SelectionDAG &DAG)
static SDValue tryFoldToZero(const SDLoc &DL, const TargetLowering &TLI, EVT VT, SelectionDAG &DAG, bool LegalOperations)
static cl::opt< unsigned > StoreMergeDependenceLimit("combiner-store-merge-dependence-limit", cl::Hidden, cl::init(10), cl::desc("Limit the number of times for the same StoreNode and RootNode " "to bail out in store merging dependence check"))
static SDValue combineABSToABD(SDNode *N, SelectionDAG &DAG, const TargetLowering &TLI)
static cl::opt< std::string > CombinerAAOnlyFunc("combiner-aa-only-func", cl::Hidden, cl::desc("Only use DAG-combiner alias analysis in this" " function"))
static bool isSlicingProfitable(SmallVectorImpl< LoadedSlice > &LoadedSlices, const APInt &UsedBits, bool ForCodeSize)
Check the profitability of all involved LoadedSlice.
static bool isBSwapHWordElement(SDValue N, MutableArrayRef< SDNode * > Parts)
Return true if the specified node is an element that makes up a 32-bit packed halfword byteswap.
static SDValue FoldIntToFPToInt(SDNode *N, SelectionDAG &DAG)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
PropagateLiveness Given that RA is a live propagate it s liveness to any other values it uses(according to Uses). void DeadArgumentEliminationPass
static ManagedStatic< DebugCounter > DC
This file defines the DenseMap class.
Optional< std::vector< StOtherPiece > > Other
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
iv Induction Variable Users
static Value * simplifyDivRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q)
Check for common or similar folds of integer division or integer remainder.
This file implements a coalescing interval map for small objects.
static void removeFromWorklist(Instruction *I, std::vector< Instruction * > &Worklist)
Remove all instances of I from the worklist vector specified.
unsigned const TargetRegisterInfo * TRI
This file provides utility analysis objects describing memory locations.
This file provides None, an enumerator for use in implicit constructors of various (usually templated...
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
This file provides Optional, a template class modeled in the spirit of OCaml's 'opt' variant.
const char LLVMTargetMachineRef LLVMPassBuilderOptionsRef Options
static cl::opt< bool > Aggressive("aggressive-ext-opt", cl::Hidden, cl::desc("Aggressive extension optimization"))
static StringRef getExtensionType(StringRef Ext)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isSimple(Instruction *I)
static cl::opt< bool > UseTBAA("use-tbaa-in-sched-mi", cl::Hidden, cl::init(true), cl::desc("Enable use of TBAA during MI DAG construction"))
This file implements a set that has insertion order iteration characteristics.
This file implements the SmallBitVector class.
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
This file describes how to lower LLVM code to machine code.
static Optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static bool hasOneUse(unsigned Reg, MachineInstr *Def, MachineRegisterInfo &MRI, MachineDominatorTree &MDT, LiveIntervals &LIS)
static constexpr int Concat[]
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
static void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
uint64_t getZExtValue() const
Get zero extended value.
APInt trunc(unsigned width) const
Truncate to new width.
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
unsigned countTrailingZeros() const
Count the number of trailing zero bits.
unsigned countLeadingZeros() const
The APInt version of the countLeadingZeros functions in MathExtras.h.
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
bool getBoolValue() const
Convert APInt to a boolean value.
bool isMask(unsigned numBits) const
APInt sext(unsigned width) const
Sign extend to a new width.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
unsigned countTrailingOnes() const
Count the number of trailing one bits.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
static bool computeAliasing(const SDNode *Op0, const Optional< int64_t > NumBytes0, const SDNode *Op1, const Optional< int64_t > NumBytes1, const SelectionDAG &DAG, bool &IsAlias)
static BaseIndexOffset match(const SDNode *N, const SelectionDAG &DAG)
Parses tree in N for base, index, offset addresses.
A "pseudo-class" with methods for operating on BUILD_VECTORs.
ISD::CondCode get() const
static Constant * get(ArrayType *T, ArrayRef< Constant * > V)
const APFloat & getValueAPF() const
bool isZero() const
Return true if the value is positive or negative zero.
ConstantFP - Floating Point Values [float, double].
const ConstantInt * getConstantIntValue() const
uint64_t getZExtValue() const
const APInt & getAPIntValue() const
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
bool isLittleEndian() const
Layout endianness...
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
bool isScalar() const
Counting predicates.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
AttributeList getAttributes() const
Return the attribute list for this Function.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
This class is used to form a handle around another node that is persistent and is updated across invo...
This is an important class for using LLVM in a threaded context.
Base class for LoadSDNode and StoreSDNode.
bool isIndexed() const
Return true if this is a pre/post inc/dec load/store.
bool isScalable() const
Returns whether the size is scaled by a runtime quantity (vscale).
static ElementCount getFixed(ScalarTy MinVal)
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
static MVT getIntegerVT(unsigned BitWidth)
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
Function & getFunction()
Return the LLVM function that this machine code represents.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOStore
The memory access writes data.
Flags getFlags() const
Return the raw flags of the source value,.
This class is used to represent an MGATHER node.
This is a base class used to represent MGATHER and MSCATTER nodes.
This class is used to represent an MLOAD node.
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
This class is used to represent an MSCATTER node.
This class is used to represent an MSTORE node.
bool isCompressingStore() const
Returns true if the op does a compression to the vector before storing.
const SDValue & getOffset() const
const SDValue & getBasePtr() const
const SDValue & getMask() const
const SDValue & getValue() const
bool isTruncatingStore() const
Return true if the op does a truncation before store.
unsigned getAddressSpace() const
Return the address space for the associated pointer.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
Align getOriginalAlign() const
Returns alignment and volatility of the memory access.
bool isSimple() const
Returns true if the memory operation is neither atomic or volatile.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
Representation for a specific memory location.
static uint64_t getSizeOrUnknown(const TypeSize &T)
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
This class provides iterator support for SDUse operands that use a specific SDNode.
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
void dump() const
Dump this node, for debugging.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
iterator_range< use_iterator > uses()
SDNodeFlags getFlags() const
size_t use_size() const
Return the number of uses of this node.
TypeSize getValueSizeInBits(unsigned ResNo) const
Returns MVT::getSizeInBits(getValueType(ResNo)).
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallVectorImpl< const SDNode * > &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
bool use_empty() const
Return true if there are no uses of this node.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
use_iterator use_begin() const
Provide iteration support to walk over all uses of an SDNode.
const APInt & getConstantOperandAPInt(unsigned Num) const
Helper method returns the APInt of a ConstantSDNode operand.
bool isPredecessorOf(const SDNode *N) const
Return true if this node is a predecessor of N.
bool hasAnyUseOfValue(unsigned Value) const
Return true if there are any use of the indicated value.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
op_iterator op_end() const
op_iterator op_begin() const
static use_iterator use_end()
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getScalarValueSizeInBits() const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
unsigned getNumOperands() const
Targets can subclass this to parameterize the SelectionDAG lowering and instruction selection process...
virtual bool disableGenericCombines(CodeGenOpt::Level OptLevel) const
Help to insert SDNodeFlags automatically in transforming.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getSplatSourceVector(SDValue V, int &SplatIndex)
If V is a splatted value, return the source vector and its splat index.
unsigned ComputeMaxSignificantBits(SDValue Op, unsigned Depth=0) const
Get the upper bound on bit size for this Value Op as a signed integer.
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
SDValue getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy)
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS)
Helper function to make it easier to build Select's if you just have operands and don't want to check...
bool isKnownNeverZero(SDValue Op) const
Test whether the given SDValue is known to contain non-zero value(s).
const TargetSubtargetInfo & getSubtarget() const
SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getSplatValue(SDValue V, bool LegalTypes=false)
If V is a splat vector, return its scalar source operand by extracting that element from the source v...
SDValue FoldSetCC(EVT VT, SDValue N1, SDValue N2, ISD::CondCode Cond, const SDLoc &dl)
Constant fold a setcc to true or false.
SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
SDNode * isConstantIntBuildVectorOrConstantInt(SDValue N) const
Test whether the given value is a constant int or similar node.
SDValue makeEquivalentMemoryOrdering(SDValue OldChain, SDValue NewMemOpChain)
If an existing load has uses of its chain, create a token factor node with that chain and the new mem...
void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To, unsigned Num)
Like ReplaceAllUsesOfValueWith, but for multiple values at once.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=None, int Offs=0, bool isT=false, unsigned TargetFlags=0)
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
bool haveNoCommonBitsSet(SDValue A, SDValue B) const
Return true if A and B have no common bits set.
SDValue getAssertAlign(const SDLoc &DL, SDValue V, Align A)
Return an AssertAlignSDNode.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
bool shouldOptForSize() const
OverflowKind computeOverflowKind(SDValue N0, SDValue N1) const
Determine if the result of the addition of 2 node can overflow.
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
SDValue getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
std::pair< EVT, EVT > GetSplitDestVTs(const EVT &VT) const
Compute the VTs needed for the low/hi parts of a type which is split (or expanded) into two not neces...
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
void DeleteNode(SDNode *N)
Remove the specified node from the system.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue simplifySelect(SDValue Cond, SDValue TVal, SDValue FVal)
Try to simplify a select/vselect into 1 of its operands or a constant.
SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
const DataLayout & getDataLayout() const
SDNode * isConstantFPBuildVectorOrConstantFP(SDValue N) const
Test whether the given value is a constant FP or similar node.
SDValue GetDemandedBits(SDValue V, const APInt &DemandedBits)
See if the specified operand can be simplified with the knowledge that only the bits specified by Dem...
SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl< SDValue > &Vals)
Creates a new TokenFactor containing Vals.
bool LegalizeOp(SDNode *N, SmallSetVector< SDNode *, 16 > &UpdatedNodes)
Transforms a SelectionDAG node and any operands to it into a node that is compatible with the target ...
bool areNonVolatileConsecutiveLoads(LoadSDNode *LD, LoadSDNode *Base, unsigned Bytes, int Dist) const
Return true if loads are next to each other and can be merged.
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV)
Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to the shuffle node in input but with swa...
bool isKnownToBeAPowerOfTwo(SDValue Val) const
Test if the given value is known to have exactly one bit set.
bool isGuaranteedNotToBeUndefOrPoison(SDValue Op, bool PoisonOnly=false, unsigned Depth=0) const
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
MaybeAlign InferPtrAlign(SDValue Ptr) const
Infer alignment of a load / store address.
bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
SDValue getIndexedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT)
Convert Op, which must be of integer type, to the integer type VT, by using an extension appropriate ...
SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Base, SDValue Offset, SDValue Mask, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
static const fltSemantics & EVTToAPFloatSemantics(EVT VT)
Returns an APFloat semantics tag appropriate for the given type.
const TargetMachine & getTarget() const
SDValue getStepVector(const SDLoc &DL, EVT ResVT, APInt StepVal)
Returns a vector of type ResVT whose elements contain the linear sequence <0, Step,...
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond)
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
bool isKnownNeverNaN(SDValue Op, bool SNaN=false, unsigned Depth=0) const
Test whether the given SDValue is known to never be NaN.
SDValue getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
const TargetLibraryInfo & getLibInfo() const
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT)
Create a true or false constant of type VT using the target's BooleanContent for type OpVT.
SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
void Combine(CombineLevel Level, AAResults *AA, CodeGenOpt::Level OptLevel)
This iterates over the nodes in the SelectionDAG, folding certain types of nodes together,...
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
LLVMContext * getContext() const
SDValue simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y, SDNodeFlags Flags)
Try to simplify a floating-point binary operation into 1 of its operands or a constant.
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL, bool LegalTypes=true)
bool isUndef(unsigned Opcode, ArrayRef< SDValue > Ops)
Return true if the result of this operation is always undefined.
SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
SDNode * getNodeIfExists(unsigned Opcode, SDVTList VTList, ArrayRef< SDValue > Ops, const SDNodeFlags Flags)
Get the specified node if it's already available, or else return NULL.
SDValue getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
DenormalMode getDenormalMode(EVT VT) const
Return the current function's default denormal handling kind for the given floating point type.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
SDValue simplifyShift(SDValue X, SDValue Y)
Try to simplify a shift into 1 of its operands or a constant.
void transferDbgValues(SDValue From, SDValue To, unsigned OffsetInBits=0, unsigned SizeInBits=0, bool InvalidateDbg=true)
Transfer debug values from one node to another, while optionally generating fragment expressions for ...
SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a logical NOT operation as (XOR Val, BooleanOne).
SDValue getMaskedScatter(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, bool IsTruncating=false)
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
static void commuteMask(MutableArrayRef< int > Mask)
Change values in a shuffle permute mask assuming the two vector operands have swapped position.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
A SetVector that performs no allocations if smaller than a certain size.
This class is used to represent ISD::STORE nodes.
bool has(LibFunc F) const
Tests whether a library function is available.
virtual bool isMulAddWithConstProfitable(const SDValue &AddNode, const SDValue &ConstNode) const
Return true if it may be profitable to transform (mul (add x, c1), c2) -> (add (mul x,...
virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
virtual bool decomposeMulByConstant(LLVMContext &Context, EVT VT, SDValue C) const
Return true if it is profitable to transform an integer multiplication-by-constant into simpler opera...
virtual bool hasAndNot(SDValue X) const
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify se...
virtual bool isShuffleMaskLegal(ArrayRef< int >, EVT) const
Targets can use this to indicate that they only support some VECTOR_SHUFFLE operations,...
virtual bool enableAggressiveFMAFusion(EVT VT) const
Return true if target always benefits from combining into FMA for a given value type.
bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
SDValue promoteTargetBoolean(SelectionDAG &DAG, SDValue Bool, EVT ValVT) const
Promote the given target boolean to a target boolean of the given type.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
virtual bool canCombineTruncStore(EVT ValVT, EVT MemVT, bool LegalOnly) const
virtual bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, bool *Fast=nullptr) const
Return true if the target supports a memory access of this type for the given address space and align...
virtual bool convertSetCCLogicToBitwiseLogic(EVT VT) const
Use bitwise logic to make pairs of compares more efficient.
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const
Return true if folding a vector load into ExtVal (a sign, zero, or any extend node) is profitable.
virtual bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT, const SelectionDAG &DAG, const MachineMemOperand &MMO) const
Return true if the following transform is beneficial: fold (conv (load x)) -> (load (conv*)x) On arch...
virtual bool hasBitPreservingFPLogic(EVT VT) const
Return true if it is safe to transform an integer-domain bitwise operation into the equivalent floati...
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual bool shouldRemoveExtendFromGSIndex(EVT VT) const
virtual bool isFPExtFoldable(const MachineInstr &MI, unsigned Opcode, LLT DestTy, LLT SrcTy) const
Return true if an fpext operation input to an Opcode operation is free (for instance,...
virtual bool hasBitTest(SDValue X, SDValue Y) const
Return true if the target has a bit-test instruction: (X & (1 << Y)) ==/!= 0 This knowledge can be us...
bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const
Return true if the specified store with truncation is legal on this target.
virtual bool isCommutativeBinOp(unsigned Opcode) const
Returns true if the opcode is a commutative binary operation.
virtual bool generateFMAsInMachineCombiner(EVT VT, CodeGenOpt::Level OptLevel) const
virtual bool isFPImmLegal(const APFloat &, EVT, bool ForCodeSize=false) const
Returns true if the target can instruction select the specified FP immediate natively.
virtual bool isExtractVecEltCheap(EVT VT, unsigned Index) const
Return true if extraction of a scalar element from the given vector type at the given index is cheap.
virtual bool shouldNormalizeToSelectSequence(LLVMContext &Context, EVT VT) const
Returns true if we should normalize select(N0&N1, X, Y) => select(N0, select(N1, X,...
bool isIndexedMaskedLoadLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const
When splitting a value of the specified type into parts, does the Lo or Hi part come first?...
virtual bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const
Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type from this source type with ...
virtual bool isFsqrtCheap(SDValue X, SelectionDAG &DAG) const
Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X).
virtual bool shouldFoldConstantShiftPairToMask(const SDNode *N, CombineLevel Level) const
Return true if it is profitable to fold a pair of shifts into a mask.
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
virtual bool shouldAvoidTransformToShift(EVT VT, unsigned Amount) const
Return true if creating a shift of the type by the given amount is not profitable.
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL, bool LegalTypes=true) const
Returns the type for the shift amount of a shift opcode.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
virtual bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT) const
Return true if pulling a binary operation into a select with an identity constant is profitable.
bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal on this target.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual bool preferIncOfAddToSubOfNot(EVT VT) const
These two forms are equivalent: sub y, (xor x, -1) add (add x, 1), y The variant with two add's is IR...
virtual bool isLegalAddImmediate(int64_t) const
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT) const
Return true if it is profitable to reduce a load to a smaller type.
virtual bool isProfitableToCombineMinNumMaxNum(EVT VT) const
virtual bool isFNegFree(EVT VT) const
Return true if an fneg operation is free to the point where it is never worthwhile to replace it with...
@ ZeroOrOneBooleanContent
@ UndefinedBooleanContent
@ ZeroOrNegativeOneBooleanContent
virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const
Return true if integer divide is usually cheaper than a sequence of several shifts,...
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual bool mergeStoresAfterLegalization(EVT MemVT) const
Allow store merging for the specified type after legalization in addition to before legalization.
virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const
Return true if it is cheaper to split the store of a merged int val from a pair of smaller values int...
bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal or custom on this target.
virtual bool storeOfVectorConstantIsCheap(EVT MemVT, unsigned NumElem, unsigned AddrSpace) const
Return true if it is expected to be cheaper to do a store of a non-zero vector constant with the give...
virtual bool isBinOp(unsigned Opcode) const
Return true if the node is a math/logic binary operator.
virtual bool shouldFoldMaskToVariableShiftPair(SDValue X) const
There are two ways to clear extreme bits (either low or high): Mask: x & (-1 << y) (the instcombine c...
bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
virtual bool canMergeStoresTo(unsigned AS, EVT MemVT, const MachineFunction &MF) const
Returns if it's reasonable to merge stores to MemVT size.
bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal on this target.
virtual bool shouldScalarizeBinop(SDValue VecOp) const
Try to convert an extract element of a vector binary operation into an extract element followed by a ...
virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT, const SelectionDAG &DAG, const MachineMemOperand &MMO) const
Return true if the following transform is beneficial: (store (y (conv x)), y*)) -> (store x,...
bool isIndexedMaskedStoreLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
bool hasTargetDAGCombine(ISD::NodeType NT) const
If true, the target has custom DAG combine transformations that it can perform for the specified node...
virtual bool shouldSplatInsEltVarIndex(EVT) const
Return true if inserting a scalar into a variable element of an undef vector is more efficiently hand...
NegatibleCost
Enum that specifies when a float negation is beneficial.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual bool isFMADLegal(const MachineInstr &MI, LLT Ty) const
Returns true if MI can be combined with another instruction to form TargetOpcode::G_FMAD.
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
virtual bool isNarrowingProfitable(EVT, EVT) const
Return true if it's profitable to narrow operations of type VT1 to VT2.
virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const
virtual bool isFAbsFree(EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AddrSpace, Instruction *I=nullptr) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
virtual bool hasPairedLoad(EVT, Align &) const
Return true if the target supplies and combines to a paired load two loaded values of type LoadedType...
virtual bool convertSelectOfConstantsToMath(EVT VT) const
Return true if a select of constants (select Cond, C1, C2) should be transformed into simple math ops...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const
Should we generate fp_to_si_sat and fp_to_ui_sat from type FPVT to type VT from min(max(fptoi)) satur...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask, APInt &KnownUndef, APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Vector Op.
virtual bool isReassocProfitable(SelectionDAG &DAG, SDValue N0, SDValue N1) const
SDValue getCheaperNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, unsigned Depth=0) const
This is the helper function to return the newly negated expression only when the cost is cheaper.
SDValue expandABS(SDNode *N, SelectionDAG &DAG, bool IsNegative=false) const
Expand ABS nodes.
virtual bool IsDesirableToPromoteOp(SDValue, EVT &) const
This method query the target whether it is beneficial for dag combiner to promote the specified node.
virtual bool isTypeDesirableForOp(unsigned, EVT VT) const
Return true if the target has native support for the specified value type and it is 'desirable' to us...
virtual SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, NegatibleCost &Cost, unsigned Depth=0) const
Return the newly negated expression if the cost is not expensive and set the cost in Cost to indicate...
SDValue buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0, SDValue N1, MutableArrayRef< int > Mask, SelectionDAG &DAG) const
Tries to build a legal vector shuffle using the provided parameters or equivalent variations.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Op.
bool isConstFalseVal(SDValue N) const
Return if the N is a constant or constant vector equal to the false value from getBooleanContents().
SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, SmallVectorImpl< SDNode * > &Created) const
Given an ISD::UDIV node expressing a divide by constant, return a DAG expression to select that will ...
virtual SDValue getSqrtResultForDenormInput(SDValue Operand, SelectionDAG &DAG) const
Return a target-dependent result if the input operand is not suitable for use with a square root esti...
virtual bool getPostIndexedAddressParts(SDNode *, SDNode *, SDValue &, SDValue &, ISD::MemIndexedMode &, SelectionDAG &) const
Returns true by value, base pointer and offset pointer and addressing mode by reference if this node ...
SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, bool foldBooleans, DAGCombinerInfo &DCI, const SDLoc &dl) const
Try to simplify a setcc built with the specified operands and cc.
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const
Return true if folding a constant offset with the given GlobalAddress is legal.
bool isConstTrueVal(SDValue N) const
Return if the N is a constant or constant vector equal to the true value from getBooleanContents().
SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, SDValue Index) const
Get a pointer to vector element Idx located in memory for a vector of type VecVT starting at a base a...
virtual bool isDesirableToCommuteWithShift(const SDNode *N, CombineLevel Level) const
Return true if it is profitable to move this shift by a constant amount though its operand,...
virtual unsigned combineRepeatedFPDivisors() const
Indicate whether this target prefers to combine FDIVs with the same divisor.
virtual bool getPreIndexedAddressParts(SDNode *, SDValue &, SDValue &, ISD::MemIndexedMode &, SelectionDAG &) const
Returns true by value, base pointer and offset pointer and addressing mode by reference if the node's...
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, SmallVectorImpl< SDNode * > &Created) const
Given an ISD::SDIV node expressing a divide by constant, return a DAG expression to select that will ...
virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Targets may override this function to provide custom SDIV lowering for power-of-2 denominators.
virtual bool isDesirableToTransformToIntegerOp(unsigned, EVT) const
Return true if it is profitable for dag combiner to transform a floating point op of specified opcode...
unsigned UnsafeFPMath
UnsafeFPMath - This flag is enabled when the -enable-unsafe-fp-math flag is specified on the command ...
unsigned NoSignedZerosFPMath
NoSignedZerosFPMath - This flag is enabled when the -enable-no-signed-zeros-fp-math is specified on t...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual bool useAA() const
Enable use of alias analysis during code generation (during MI scheduling, DAGCombine,...
ScalarTy getFixedSize() const
static TypeSize Fixed(ScalarTy MinVal)
The instances of the Type class are immutable: once they are created, they are never changed.
A Use represents the edge between a Value definition and its users.
User * getUser() const
Returns the User that contains this Use.
Value * getOperand(unsigned i) const
This class is used to represent EVT's, which are used to parameterize some operations.
LLVM Value Representation.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< use_iterator > uses()
Implementation for an ilist node.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char IsVolatile[]
Key for Kernel::Arg::Metadata::mIsVolatile.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
BaseIndexOffset getPointerInfo(Register Ptr, MachineRegisterInfo &MRI)
Returns a BaseIndexOffset which describes the pointer in Ptr.
CondCode getSetCCAndOperation(CondCode Op1, CondCode Op2, EVT Type)
Return the result of a logical AND between different comparisons of identical values: ((X op1 Y) & (X...
bool isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly=false)
Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where all of the elements are ~0 ...
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ FMAD
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ ADDCARRY
Carry-using nodes for multiple precision addition and subtraction.
@ SETCCCARRY
Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but op #2 is a boolean indicating ...
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
@ STEP_VECTOR
STEP_VECTOR(IMM) - Returns a scalable vector whose lanes are comprised of a linear sequence of unsign...
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ UNDEF
UNDEF - An undefined node.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ AssertAlign
AssertAlign - These nodes record if a register contains a value that has a known alignment and the tr...
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimum or maximum on two values,...
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ VSCALE
VSCALE(IMM) - Returns the runtime scaling factor used to calculate the number of elements within a sc...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ LIFETIME_START
This corresponds to the llvm.lifetime.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ HANDLENODE
HANDLENODE node - Used as a handle for various purposes.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ TargetConstant
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification,...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ CARRY_FALSE
CARRY_FALSE - This node is used when folding other nodes, like ADDC/SUBC, which indicate the carry re...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ STRICT_FADD
Constrained versions of the binary floating point operators.
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ BRCOND
BRCOND - Conditional branch.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ SADDO_CARRY
Carry-using overflow-aware nodes for multiple precision addition and subtraction.
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isBuildVectorOfConstantSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantSDNode or undef.
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
bool matchUnaryPredicate(SDValue Op, std::function< bool(ConstantSDNode *)> Match, bool AllowUndefs=false)
Attempt to match a unary predicate against a scalar/splat constant or every element of a constant BUI...
bool isZEXTLoad(const SDNode *N)
Returns true if the specified node is a ZEXTLOAD.
bool isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly=false)
Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where all of the elements are 0 o...
Optional< unsigned > getVPMaskIdx(unsigned Opcode)
The operand position of the vector mask.
bool isVPBinaryOp(unsigned Opcode)
Whether this is a vector-predicated binary operation opcode.
CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
Optional< unsigned > getVPExplicitVectorLengthIdx(unsigned Opcode)
The operand position of the explicit vector length parameter.
bool isUNINDEXEDLoad(const SDNode *N)
Returns true if the specified node is an unindexed load.
bool isEXTLoad(const SDNode *N)
Returns true if the specified node is a EXTLOAD.
bool allOperandsUndef(const SDNode *N)
Return true if the node has at least one operand and all operands of the specified node are ISD::UNDE...
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
bool isSignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs a signed comparison when used with integer o...
bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
bool matchBinaryPredicate(SDValue LHS, SDValue RHS, std::function< bool(ConstantSDNode *, ConstantSDNode *)> Match, bool AllowUndefs=false, bool AllowTypeMismatch=false)
Attempt to match a binary predicate against a pair of scalar/splat constants or every element of a pa...
bool isVPReduction(unsigned Opcode)
Whether this is a vector-predicated reduction opcode.
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
bool isBuildVectorOfConstantFPSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantFPSDNode or undef.
bool isSEXTLoad(const SDNode *N)
Returns true if the specified node is a SEXTLOAD.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
bool isBuildVectorAllOnes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are ~0 or undef.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
CondCode getSetCCOrOperation(CondCode Op1, CondCode Op2, EVT Type)
Return the result of a logical OR between different comparisons of identical values: ((X op1 Y) | (X ...
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
@ Undef
Value of the register doesn't matter.
initializer< Ty > init(const Ty &Val)
DiagnosticInfoOptimizationBase::Argument NV
/file This file defines the SmallVector class.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
bool operator<(int64_t V1, const APSInt &V2)
void stable_sort(R &&Range)
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
bool operator==(uint64_t V1, const APInt &V2)
bool isConstantOrConstantVector(MachineInstr &MI, const MachineRegisterInfo &MRI)
Determines if MI defines a constant integer or a build vector of constant integers.
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
llvm::SmallVector< int, 16 > createUnaryMask(ArrayRef< int > Mask, unsigned NumElts)
Given a shuffle mask for a binary shuffle, create the equivalent shuffle mask assuming both operands ...
bool isIntOrFPConstant(SDValue V)
Return true if V is either a integer or FP constant.
bool getAlign(const Function &F, unsigned index, unsigned &align)
bool operator!=(uint64_t V1, const APInt &V2)
bool operator>=(int64_t V1, const APSInt &V2)
std::string & operator+=(std::string &buffer, StringRef string)
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
auto reverse(ContainerTy &&C, std::enable_if_t< has_rbegin< ContainerTy >::value > *=nullptr)
Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
ConstantFPSDNode * isConstOrConstSplatFP(SDValue N, bool AllowUndefs=false)
Returns the SDNode if it is a constant splat BuildVector or constant float.
uint64_t PowerOf2Ceil(uint64_t A)
Returns the power of two which is greater than or equal to the given value.
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
unsigned M1(unsigned Val)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
bool operator>(int64_t V1, const APSInt &V2)
bool isBitwiseNot(SDValue V, bool AllowUndefs=false)
Returns true if V is a bitwise not operation.
unsigned countLeadingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the most significant bit to the least stopping at the first 1.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
unsigned countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1.
detail::ValueMatchesPoly< M > HasValue(M Matcher)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
void sort(IteratorTy Start, IteratorTy End)
SDValue peekThroughOneUseBitcasts(SDValue V)
Return the non-bitcasted and one-use source operand of V if it exists.
bool isAllOnesOrAllOnesSplat(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
bool isOneOrOneSplat(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant 1 integer or a splatted vector of a constant 1 integer (with n...
unsigned countTrailingOnes(T Value, ZeroBehavior ZB=ZB_Width)
Count the number of ones from the least significant bit to the first zero bit.
bool isNullOrNullSplat(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
@ Z
zlib style complession
bool is_splat(R &&Range)
Wrapper function around std::equal to detect if all elements in a container are same.
void narrowShuffleMaskElts(int Scale, ArrayRef< int > Mask, SmallVectorImpl< int > &ScaledMask)
Replace each shuffle mask index with the scaled sequential indices for an equivalent mask of narrowed...
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
unsigned M0(unsigned Val)
std::enable_if_t<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type > cast(const Y &Val)
ConstantSDNode * isConstOrConstSplat(SDValue N, bool AllowUndefs=false, bool AllowTruncation=false)
Returns the SDNode if it is a constant splat BuildVector or constant int.
constexpr unsigned BitWidth
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
bool isNullFPConstant(SDValue V)
Returns true if V is an FP constant with a value of positive zero.
Align commonAlignment(Align A, Align B)
Returns the alignment that satisfies both alignments.
unsigned Log2(Align A)
Returns the log2 of the alignment.
bool operator<=(int64_t V1, const APSInt &V2)
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
int getSplatIndex(ArrayRef< int > Mask)
If all non-negative Mask elements are the same value, return that value.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
AAMDNodes concat(const AAMDNodes &Other) const
Determine the best AAMDNodes after concatenating two different locations together.
static constexpr roundingMode rmNearestTiesToEven
static unsigned int semanticsPrecision(const fltSemantics &)
opStatus
IEEE-754R 7: Default exception handling.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
Represent subnormal handling kind for floating point instruction inputs and outputs.
static constexpr DenormalMode getIEEE()
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isByteSized() const
Return true if the bit size is a multiple of 8.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isFixedLengthVector() const
static EVT getFloatingPointVT(unsigned BitWidth)
Returns the EVT that represents a floating-point type with the given number of bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsEq(EVT VT) const
Return true if this has the same number of bits as VT.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
Helper struct to store a base, index and offset that forms an address.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
unsigned countMaxActiveBits() const
Returns the maximum number of bits needed to represent all possible unsigned values with these known ...
bool isAllOnes() const
Returns true if value is all one bits.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
MachinePointerInfo getWithOffset(int64_t O) const
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
These are IR-level optimization flags that may be propagated to SDNodes.
void setNoUnsignedWrap(bool b)
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
Clients of various APIs that cause global effects on the DAG can optionally implement this interface.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
void AddToWorklist(SDNode *N)
bool recursivelyDeleteUnusedNodes(SDNode *N)
SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)
void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO)
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...